text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""MGEScan-nonLTR: identifying non-ltr in genome sequences
Usage:
nonltr.py all <genome_dir> [--output=<data_dir>]
nonltr.py forward <genome_dir> [--output=<data_dir>]
nonltr.py backward <genome_dir> [--output=<data_dir>]
nonltr.py reverseq <genome_dir> [--output=<data_dir>]
nonltr.py qvalue <genome_dir> [--output=<data_dir>]
nonltr.py gff3 <genome_dir> [--output=<data_dir>]
nonltr.py (-h | --help)
nonltr.py --version
Options:
-h --help Show this screen
--version Show version
--output=<data_dir> Path where results will be saved
"""
from docopt import docopt
from multiprocessing import Process
from subprocess import Popen, PIPE
from mgescan.cmd import MGEScan
from mgescan import utils
from biopython import reverse_complement_fasta, getid
import os
import time
import shutil
class nonLTR(MGEScan):
main_dir = "nonltr"
cmd_hmm = main_dir + "/run_hmm.pl"
cmd_post_process = main_dir + "/post_process.pl"
cmd_validate_q_value = main_dir + "/post_process2.pl"
cmd_togff = main_dir + "/toGFF.py"
processes = set()
max_processes = 5
def __init__(self, args):
self.args = args
self.all_enabled = self.args['all']
self.forward_enabled = self.args['forward']
self.backward_enabled = self.args['backward']
self.reverseq_enabled = self.args['reverseq']
self.qvalue_enabled = self.args['qvalue']
self.gff3_enabled = self.args['gff3']
self.set_inputs()
self.set_defaults()
def set_inputs(self):
self.data_dir = utils.get_abspath(self.args['--output'])
self.genome_dir = utils.get_abspath(self.args['<genome_dir>'])
def set_defaults(self):
super(nonLTR, self).set_defaults()
self.plus_dir = self.genome_dir
if self.reverseq_enabled :
# minus_dir used to be genome_dir + "_b"
self.minus_dir = self.data_dir + "/_reversed/"
else:
self.minus_dir = self.genome_dir + "/_reversed/"
self.plus_out_dir = self.data_dir + "/f/"
self.minus_out_dir = self.data_dir + "/b/"
def run(self):
# Step 1
p1 = Process(target=self.forward_strand)
if (self.all_enabled) or (self.forward_enabled):
p1.start()
# Step 2
if (self.all_enabled) or (self.reverseq_enabled):
# Reverse complement before backward strand
self.reverse_complement()
# Step 3
p2 = Process(target=self.backward_strand)
if (self.all_enabled) or (self.backward_enabled):
p2.start()
if (self.all_enabled) or (self.forward_enabled):
p1.join()
if (self.all_enabled) or (self.backward_enabled):
p2.join()
# Step 4
if (self.all_enabled) or (self.qvalue_enabled):
# validation for q value
self.post_processing2()
# Step 5
if (self.all_enabled) or (self.gff3_enabled):
# convert to gff3
self.toGFF()
def forward_strand(self):
mypath = self.plus_dir
out_dir = self.plus_out_dir
for (dirpath, dirnames, filenames) in os.walk(mypath):
break
for name in filenames:
file_path = utils.get_abspath(dirpath + "/" + name)
# Rename to sequence id
sid = getid(file_path)
new_path = utils.get_abspath(dirpath + "/" + sid)
os.rename(file_path, new_path)
command = self.cmd_hmm + (" --dna=%s --out=%s --hmmerv=%s" %
(new_path, out_dir, self.hmmerv))
command = command.split()
self.processes.add(Popen(command, stdout=PIPE,
stderr=PIPE))
if len(self.processes) >= self.max_processes:
time.sleep(.1)
self.processes.difference_update([p for p in self.processes if
p.poll() is not None])
#print dirpath, dirnames, filenames
for p in self.processes:
if p.poll() is None:
p.wait()
self.post_processing_after_forward_strand()
def backward_strand(self):
mypath = self.minus_dir
out_dir = self.minus_out_dir
for (dirpath, dirnames, filenames) in os.walk(mypath):
break
for name in filenames:
file_path = utils.get_abspath(dirpath + "/" + name)
# Rename to sequence id
sid = getid(file_path)
new_path = utils.get_abspath(dirpath + "/" + sid)
os.rename(file_path, new_path)
command = self.cmd_hmm + (" --dna=%s --out=%s --hmmerv=%s" %
(new_path, out_dir, self.hmmerv))
command = command.split()
self.processes.add(Popen(command, stdout=PIPE,
stderr=PIPE))
if len(self.processes) >= self.max_processes:
time.sleep(.1)
self.processes.difference_update([p for p in self.processes if
p.poll() is not None])
#print dirpath, dirnames, filenames
for p in self.processes:
if p.poll() is None:
p.wait()
self.post_processing_after_reverse_strand()
def post_processing_after_forward_strand(self):
self.post_processing(self.plus_out_dir, self.plus_dir, 0)
def post_processing_after_reverse_strand(self):
self.post_processing(self.minus_out_dir, self.minus_dir, 1)
def post_processing(self, out_dir, dir, reverse_yn):
utils.silentremove(utils.get_abspath(out_dir + "/out1/aaaaa"))
utils.silentremove(utils.get_abspath(out_dir + "out1/bbbbb"))
utils.silentremove(utils.get_abspath(out_dir + "out1/ppppp"))
utils.silentremove(utils.get_abspath(out_dir + "out1/qqqqq"))
cmd = self.cmd_post_process + (" --dna=%s --out=%s --rev=%s" %
(dir, out_dir, reverse_yn))
self.run_cmd(cmd)
def reverse_complement(self):
mypath = self.genome_dir
for (dirpath, dirnames, filenames) in os.walk(mypath):
break
directory = self.minus_dir
if not os.path.exists(directory):
os.makedirs(directory)
for name in filenames:
file_path = utils.get_abspath(dirpath + "/" + name)
reverse_complement_fasta(file_path, directory)
def post_processing2(self):
if self.qvalue_enabled:
shutil.move(self.genome_dir + "/b", self.data_dir)
shutil.move(self.genome_dir + "/f", self.data_dir)
cmd = self.cmd_validate_q_value + \
" --data_dir=%(data_dir)s --hmmerv=%(hmmerv)s"
self.run_cmd(cmd)
def toGFF(self):
if self.gff3_enabled:
# Assume info is a only directory in genome_dir
shutil.move(self.genome_dir + "/info", self.data_dir)
# gff3
self.nonltr_out_path = utils.get_abspath(self.data_dir + "/info/full/")
self.nonltr_gff_path = utils.get_abspath(self.data_dir + "/info/nonltr.gff3")
cmd = self.cmd_togff + " %(nonltr_out_path)s %(nonltr_gff_path)s"
res = self.run_cmd(cmd)
def main():
arguments = docopt(__doc__, version="nonltr 0.2")
nonltr = nonLTR(arguments)
nonltr.run()
if __name__ == "__main__":
main()
|
MGEScan/mgescan
|
mgescan/nonltr.py
|
Python
|
gpl-3.0
| 7,365
|
[
"Biopython"
] |
c00dd03b81e853f2eb351713baf3470e9605a3ae6919130fab01edb4f7dda8ef
|
"""I produce the hourly analysis used by IEMRE."""
import datetime
import os
import sys
import pygrib
import numpy as np
import pandas as pd
from metpy.units import masked_array, units
from metpy.calc import wind_components
from metpy.interpolate import inverse_distance_to_grid
from scipy.interpolate import NearestNDInterpolator
from pyiem import iemre
from pyiem.util import get_sqlalchemy_conn, ncopen, utc, logger
# stop RuntimeWarning: invalid value encountered in greater
np.warnings.filterwarnings("ignore")
LOG = logger()
MEMORY = {"ts": datetime.datetime.now()}
def use_rtma(ts, kind):
"""Verbatim copy RTMA, if it exists."""
fn = ts.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/model/rtma/%H/"
"rtma.t%Hz.awp2p5f000.grib2"
)
tasks = {
"wind": [
"10u",
"10v",
],
"tmp": [
"2t",
],
"dwp": [
"2d",
],
}
if not os.path.isfile(fn):
LOG.debug("Failed to find %s", fn)
return None
try:
grbs = pygrib.open(fn)
lats = None
res = []
for task in tasks[kind]:
grb = grbs.select(shortName=task)[0]
if lats is None:
lats, lons = [np.ravel(x) for x in grb.latlons()]
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
nn = NearestNDInterpolator((lons, lats), np.ravel(grb.values))
res.append(nn(xi, yi))
return res
except Exception as exp:
LOG.debug("%s exp:%s", fn, exp)
return None
def grid_wind(df, domain):
"""
Grid winds based on u and v components
@return uwnd, vwnd
"""
# compute components
u = []
v = []
for _station, row in df.iterrows():
(_u, _v) = wind_components(
units("knot") * row["sknt"], units("degree") * row["drct"]
)
u.append(_u.to("meter / second").m)
v.append(_v.to("meter / second").m)
df["u"] = u
df["v"] = v
ugrid = generic_gridder(df, "u", domain, applymask=False)
vgrid = generic_gridder(df, "v", domain, applymask=False)
return ugrid, vgrid
def grid_skyc(df, domain):
"""Hmmmm"""
v = []
for _station, row in df.iterrows():
try:
_v = max(row["max_skyc1"], row["max_skyc2"], row["max_skyc3"])
except TypeError:
continue
v.append(_v)
df["skyc"] = v
return generic_gridder(df, "skyc", domain)
def generic_gridder(df, idx, domain, applymask=True):
"""Generic gridding algorithm for easy variables"""
df2 = df[pd.notnull(df[idx])]
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
res = np.ones(xi.shape) * np.nan
# set a sentinel of where we won't be estimating
if applymask:
res = np.where(domain > 0, res, -9999)
# do our gridding
grid = inverse_distance_to_grid(
df2["lon"].values, df2["lat"].values, df2[idx].values, xi, yi, 1.5
)
# replace nan values in res with whatever now is in grid
res = np.where(np.isnan(res), grid, res)
# Do we still have missing values?
if np.isnan(res).any():
# very aggressive with search radius
grid = inverse_distance_to_grid(
df2["lon"].values, df2["lat"].values, df2[idx].values, xi, yi, 5.5
)
# replace nan values in res with whatever now is in grid
res = np.where(np.isnan(res), grid, res)
# replace sentinel back to np.nan
# replace sentinel back to np.nan
res = np.where(res == -9999, np.nan, res)
return np.ma.array(res, mask=np.isnan(res))
def grid_hour(ts):
"""
I proctor the gridding of data on an hourly basis
@param ts Timestamp of the analysis, we'll consider a 20 minute window
"""
LOG.debug("grid_hour called...")
with ncopen(iemre.get_hourly_ncname(ts.year), "r", timeout=300) as nc:
domain = nc.variables["hasdata"][:, :]
ts0 = ts - datetime.timedelta(minutes=10)
ts1 = ts + datetime.timedelta(minutes=10)
mybuf = 2.0
params = (
iemre.WEST - mybuf,
iemre.SOUTH - mybuf,
iemre.WEST - mybuf,
iemre.NORTH + mybuf,
iemre.EAST + mybuf,
iemre.NORTH + mybuf,
iemre.EAST + mybuf,
iemre.SOUTH - mybuf,
iemre.WEST - mybuf,
iemre.SOUTH - mybuf,
ts0,
ts1,
)
with get_sqlalchemy_conn("asos") as conn:
df = pd.read_sql(
"""SELECT station, ST_x(geom) as lon, st_y(geom) as lat,
max(case when tmpf > -60 and tmpf < 130 THEN tmpf else null end)
as max_tmpf,
max(case when sknt > 0 and sknt < 100 then sknt else 0 end) as max_sknt,
max(getskyc(skyc1)) as max_skyc1,
max(getskyc(skyc2)) as max_skyc2,
max(getskyc(skyc3)) as max_skyc3,
max(case when p01i > 0 and p01i < 1000 then p01i else 0 end) as phour,
max(case when dwpf > -60 and dwpf < 100 THEN dwpf else null end)
as max_dwpf,
max(case when sknt >= 0 then sknt else 0 end) as sknt,
max(case when sknt >= 0 then drct else 0 end) as drct
from alldata a JOIN stations t on (a.station = t.id) WHERE
ST_Contains(
ST_GeomFromEWKT('SRID=4326;POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))'),
geom) and (t.network ~* 'ASOS' or t.network = 'AWOS') and
valid >= %s and valid < %s and report_type = 2
GROUP by station, lon, lat""",
conn,
params=params,
index_col="station",
)
# try first to use RTMA
res = use_rtma(ts, "wind")
if res is not None:
ures, vres = res
else:
if df.empty:
LOG.warning("%s has no entries, FAIL", ts)
return
ures, vres = grid_wind(df, domain)
LOG.info(
"wind is done. max(ures): %s max(vres): %s",
np.max(ures),
np.max(vres),
)
if ures is None:
LOG.warning("Failure for uwnd at %s", ts)
else:
write_grid(ts, "uwnd", ures)
write_grid(ts, "vwnd", vres)
# try first to use RTMA
res = use_rtma(ts, "tmp")
did_gridding = False
if res is not None:
tmpf = masked_array(res[0], data_units="degK").to("degF").m
else:
if df.empty:
LOG.warning("%s has no entries, FAIL", ts)
return
did_gridding = True
tmpf = generic_gridder(df, "max_tmpf", domain)
# try first to use RTMA
res = use_rtma(ts, "dwp")
# Ensure we have RTMA temps available
if not did_gridding and res is not None:
dwpf = masked_array(res[0], data_units="degK").to("degF").m
else:
if df.empty:
LOG.warning("%s has no entries, FAIL", ts)
return
dwpf = generic_gridder(df, "max_dwpf", domain)
# require that dwpk <= tmpk
mask = ~np.isnan(dwpf)
mask[mask] &= dwpf[mask] > tmpf[mask]
dwpf = np.where(mask, tmpf, dwpf)
write_grid(ts, "tmpk", masked_array(tmpf, data_units="degF").to("degK"))
write_grid(ts, "dwpk", masked_array(dwpf, data_units="degF").to("degK"))
res = grid_skyc(df, domain)
LOG.info("grid skyc is done")
if res is None:
LOG.warning("Failure for skyc at %s", ts)
else:
write_grid(ts, "skyc", res)
def write_grid(valid, vname, grid):
"""Atomic write of data to our netcdf storage
This is isolated so that we don't 'lock' up our file while intensive
work is done
"""
offset = iemre.hourly_offset(valid)
with ncopen(iemre.get_hourly_ncname(valid.year), "a", timeout=300) as nc:
LOG.info(
"offset: %s writing %s with min: %s max: %s Ames: %s",
offset,
vname,
np.ma.min(grid),
np.ma.max(grid),
grid[151, 259],
)
nc.variables[vname][offset] = grid
def main(argv):
"""Go Main"""
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
grid_hour(ts)
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/iemre/hourly_analysis.py
|
Python
|
mit
| 7,939
|
[
"NetCDF"
] |
6422e4fb1cdf12e91f87008d94935bd60b575bc4c0048e218d18df742fb63ddf
|
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import sh
from molecule import logger
from molecule import util
from molecule.dependency import base
LOG = logger.get_logger(__name__)
class Shell(base.Base):
"""
``Shell`` is an alternate dependency manager. It is intended to run a
command in situations where `Ansible Galaxy`_ and `Gilt`_ don't suffice.
The ``command`` to execute is required, and is relative to Molecule's
project directory when referencing a script not in $PATH.
.. note::
Unlike the other dependency managers, ``options`` are ignored and not
passed to `shell`. Additional flags/subcommands should simply be added
to the `command`.
.. code-block:: yaml
dependency:
name: shell
command: path/to/command --flag1 subcommand --flag2
The dependency manager can be disabled by setting ``enabled`` to False.
.. code-block:: yaml
dependency:
name: shell
command: path/to/command --flag1 subcommand --flag2
enabled: False
Environment variables can be passed to the dependency.
.. code-block:: yaml
dependency:
name: shell
command: path/to/command --flag1 subcommand --flag2
env:
FOO: bar
"""
def __init__(self, config):
super(Shell, self).__init__(config)
self._sh_command = None
# self.command = config..config['dependency']['command']
@property
def command(self):
return self._config.config['dependency']['command']
@property
def default_options(self):
return {}
@property
def default_env(self):
return util.merge_dicts(os.environ.copy(), self._config.env)
def bake(self):
"""
Bake a ``shell`` command so it's ready to execute and returns None.
:return: None
"""
command_list = self.command.split(' ')
command, args = command_list[0], command_list[1:]
self._sh_command = getattr(sh, command)
# Reconstruct command with remaining args.
self._sh_command = self._sh_command.bake(
args, _env=self.env, _out=LOG.out, _err=LOG.error)
def execute(self):
if not self.enabled:
msg = 'Skipping, dependency is disabled.'
LOG.warn(msg)
return
if self._sh_command is None:
self.bake()
try:
util.run_command(self._sh_command, debug=self._config.debug)
msg = 'Dependency completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
def _has_command_configured(self):
return 'command' in self._config.config['dependency']
|
metacloud/molecule
|
molecule/dependency/shell.py
|
Python
|
mit
| 3,865
|
[
"Galaxy"
] |
be2c0e604f096d304fae1cd1cfd4816017dfd52e880a446440f2b969735f5376
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog component representing a directory tree with
enumerated paths
"""
__RCSID__ = "$Id$"
import os
from types import ListType, StringTypes
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryTreeBase import DirectoryTreeBase
MAX_LEVELS = 15
class DirectoryLevelTree(DirectoryTreeBase):
""" Class managing Directory Tree as a simple self-linked structure
with full directory path stored in each node
"""
_tables = {}
_tables["FC_DirectoryLevelTree"] = { "Fields": {
"DirID": "INTEGER AUTO_INCREMENT",
"DirName": "VARCHAR(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL",
"Parent": "INTEGER NOT NULL",
"Level": "INT NOT NULL"
},
"PrimaryKey": "DirID",
"Indexes": {
"Parent": ["Parent"],
"Level": ["Level"]
},
"UniqueIndexes": { "DirName": ["DirName"] }
}
for i in range( 1, MAX_LEVELS+1 ):
_tables["FC_DirectoryLevelTree"]["Fields"]['LPATH%d' % i] = "SMALLINT NOT NULL DEFAULT 0"
def __init__(self,database=None):
DirectoryTreeBase.__init__(self,database)
self.treeTable = 'FC_DirectoryLevelTree'
def getTreeType(self):
return 'Directory'
def findDir(self,path,connection=False):
""" Find directory ID for the given path
"""
dpath = os.path.normpath( path )
req = "SELECT DirID,Level from FC_DirectoryLevelTree WHERE DirName='%s'" % dpath
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK('')
res = S_OK(result['Value'][0][0])
res['Level'] = result['Value'][0][1]
return res
def findDirs( self, paths, connection=False ):
""" Find DirIDs for the given path list
"""
dpaths = ','.join( [ "'"+os.path.normpath( path )+"'" for path in paths ] )
req = "SELECT DirName,DirID from FC_DirectoryLevelTree WHERE DirName in (%s)" % dpaths
result = self.db._query(req,connection)
if not result['OK']:
return result
dirDict = {}
for dirName, dirID in result['Value']:
dirDict[dirName] = dirID
return S_OK( dirDict )
def removeDir(self,path):
""" Remove directory
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
res = S_OK()
res["DirID"] = 0
return res
dirID = result['Value']
req = "DELETE FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._update(req)
result['DirID'] = dirID
return result
def __getNumericPath(self,dirID,connection=False):
""" Get the enumerated path of the given directory
"""
epathString = ','.join( [ 'LPATH%d' % (i+1) for i in range( MAX_LEVELS ) ] )
req = 'SELECT LEVEL,%s FROM FC_DirectoryLevelTree WHERE DirID=%d' % (epathString,dirID)
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
row = result['Value'][0]
level = row[0]
epathList = []
for i in range(level):
epathList.append(row[i+1])
result = S_OK(epathList)
result['Level'] = level
return result
def makeDir(self,path):
""" Create a new directory entry
"""
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if dirID:
result = S_OK(dirID)
result['NewDirectory'] = False
return result
dpath = path
if path == '/':
level = 0
elements = []
parentDirID = 0
else:
if path[0] == "/":
dpath = path[1:]
elements = dpath.split('/')
level = len(elements)
if level > MAX_LEVELS:
return S_ERROR('Too many directory levels: %d' % level)
result = self.getParent(path)
if not result['OK']:
return result
parentDirID = result['Value']
epathList = []
if parentDirID:
result = self.__getNumericPath(parentDirID)
if not result['OK']:
return result
epathList = result['Value']
names = ['DirName','Level','Parent']
values = [path,level,parentDirID]
if path != '/':
for i in range(1,level,1):
names.append('LPATH%d' % i)
values.append(epathList[i-1])
result = self.db._getConnection()
conn = result['Value']
#result = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE; ",conn)
result = self.db._insert('FC_DirectoryLevelTree',names,values,conn)
if not result['OK']:
#resUnlock = self.db._query("UNLOCK TABLES;",conn)
if result['Message'].find('Duplicate') != -1:
#The directory is already added
resFind = self.findDir(path)
if not resFind['OK']:
return resFind
dirID = resFind['Value']
result = S_OK(dirID)
result['NewDirectory'] = False
return result
else:
return result
dirID = result['lastRowId']
# Update the path number
if parentDirID:
# lPath = "LPATH%d" % (level)
# req = " SELECT @tmpvar:=max(%s)+1 FROM FC_DirectoryLevelTree WHERE Parent=%d; " % (lPath,parentDirID)
# resultLock = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE; ",conn)
# result = self.db._query(req,conn)
# req = "UPDATE FC_DirectoryLevelTree SET %s=@tmpvar WHERE DirID=%d; " % (lPath,dirID)
# result = self.db._update(req,conn)
# result = self.db._query("UNLOCK TABLES;",conn)
lPath = "LPATH%d" % (level)
req = " SELECT @tmpvar:=max(%s)+1 FROM FC_DirectoryLevelTree WHERE Parent=%d FOR UPDATE; " % ( lPath, parentDirID )
resultLock = self.db._query( "START TRANSACTION; ", conn )
result = self.db._query(req,conn)
req = "UPDATE FC_DirectoryLevelTree SET %s=@tmpvar WHERE DirID=%d; " % (lPath,dirID)
result = self.db._update(req,conn)
result = self.db._query( "COMMIT;", conn )
if not result['OK']:
return result
else:
result = self.db._query( "ROLLBACK;", conn )
result = S_OK(dirID)
result['NewDirectory'] = True
return result
def existsDir(self,path):
""" Check the existence of a directory at the specified path
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({"Exists":False})
else:
return S_OK({"Exists":True,"DirID":result['Value']})
def getParent(self,path):
""" Get the parent ID of the given directory
"""
parent_dir = os.path.dirname(path)
return self.findDir(parent_dir)
def getParentID(self,dirPathOrID):
""" Get the ID of the parent of a directory specified by ID
"""
dirID = dirPathOrID
if type(dirPathOrID) in StringTypes:
result = self.findDir(dirPathOrID)
if not result['OK']:
return result
dirID = result['Value']
if dirID == 0:
return S_ERROR('Root directory ID given')
req = "SELECT Parent FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No parent found')
return S_OK(result['Value'][0][0])
def getDirectoryPath(self,dirID):
""" Get directory name by directory ID
"""
req = "SELECT DirName FROM FC_DirectoryLevelTree WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory with id %d not found' % int(dirID) )
return S_OK(result['Value'][0][0])
def getDirectoryPaths(self,dirIDList):
""" Get directory name by directory ID list
"""
dirs = dirIDList
if type(dirIDList) != ListType:
dirs = [dirIDList]
dirListString = ','.join( [ str(dir_) for dir_ in dirs ] )
req = "SELECT DirID,DirName FROM FC_DirectoryLevelTree WHERE DirID in ( %s )" % dirListString
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directories not found: %s' % dirListString )
resultDict = {}
for row in result['Value']:
resultDict[int(row[0])] = row[1]
return S_OK(resultDict)
def getDirectoryName(self,dirID):
""" Get directory name by directory ID
"""
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
return S_OK(os.path.basename(result['Value']))
def getPathIDs(self,path):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its path
"""
elements = path.split('/')
pelements = []
dPath = ''
for el in elements[1:]:
dPath += '/'+el
pelements.append(dPath)
pelements.append( '/' )
pathString = [ "'"+p+"'" for p in pelements ]
req = "SELECT DirID FROM FC_DirectoryLevelTree WHERE DirName in (%s) ORDER BY DirID" % ','.join(pathString)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %s not found' % path)
return S_OK([ x[0] for x in result['Value'] ])
def getPathIDsByID_old(self,dirID):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its ID
"""
# The method should be rather implemented using enumerated paths
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
dPath = result['Value']
return self.getPathIDs(dPath)
def getPathIDsByID(self,dirID):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its ID
"""
result = self.__getNumericPath( dirID )
if not result['OK']:
return result
level = result['Level']
if level == 0:
return S_OK( [dirID] )
lpaths = result['Value']
lpathSelects = []
for l in range( level ):
sel = ' AND '.join( ["Level=%d" % l] + [ 'LPATH%d=%d' % (ll+1,lpaths[ll]) for ll in range( l ) ] )
lpathSelects.append( sel )
selection = '(' + ') OR ('.join( lpathSelects ) + ')'
req = "SELECT Level,DirID from FC_DirectoryLevelTree WHERE %s ORDER BY Level" % selection
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'No result for the path of Directory with ID %d' % dirID )
return S_OK([ x[1] for x in result['Value'] ] + [dirID] )
def getChildren(self,path,connection=False):
""" Get child directory IDs for the given directory
"""
if type(path) in StringTypes:
result = self.findDir(path,connection)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory does not exist: %s' % path )
dirID = result['Value']
else:
dirID = path
req = "SELECT DirID FROM FC_DirectoryLevelTree WHERE Parent=%d" % dirID
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
return S_OK([ x[0] for x in result['Value'] ])
def getSubdirectoriesByID(self,dirID,requestString=False,includeParent=False):
""" Get all the subdirectories of the given directory at a given level
"""
req = "SELECT Level FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %d not found' % dirID)
level = result['Value'][0][0]
sPaths = []
if requestString:
req = "SELECT DirID FROM FC_DirectoryLevelTree"
else:
req = "SELECT Level,DirID FROM FC_DirectoryLevelTree"
if level > 0:
req += " AS F1"
for i in range(1,level+1):
sPaths.append('LPATH%d' % i)
pathString = ','.join(sPaths)
req += " JOIN (SELECT %s FROM FC_DirectoryLevelTree WHERE DirID=%d) AS F2 ON " % (pathString,dirID)
sPaths = []
for i in range(1,level+1):
sPaths.append('F1.LPATH%d=F2.LPATH%d' % (i,i))
pString = ' AND '.join(sPaths)
if includeParent:
req += "%s AND F1.Level >= %d" % (pString,level)
else:
req += "%s AND F1.Level > %d" % (pString,level)
if requestString:
return S_OK(req)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
resDict = {}
for row in result['Value']:
resDict[row[1]] = row[0]
return S_OK(resDict)
def countSubdirectories(self, dirId, includeParent = True):
result = self.getSubdirectoriesByID( dirId, requestString = True, includeParent = includeParent )
if not result['OK']:
return result
reqDir = result['Value'].replace( 'SELECT DirID FROM', 'SELECT count(*) FROM' )
result = self.db._query( reqDir )
if not result['OK']:
return result
return S_OK( result['Value'][0][0] )
def getAllSubdirectoriesByID(self,dirList):
""" Get IDs of all the subdirectories of directories in a given list
"""
dirs = dirList
if type(dirList) != ListType:
dirs = [dirList]
resultList = []
parentList = dirs
while parentList:
subResult = []
dirListString = ','.join( [ str(dir_) for dir_ in parentList ] )
req = 'SELECT DirID from FC_DirectoryLevelTree WHERE Parent in ( %s )' % dirListString
result = self.db._query(req)
if not result['OK']:
return result
for row in result['Value']:
subResult.append(row[0])
if subResult:
resultList += subResult
parentList = subResult
return S_OK(resultList)
def getSubdirectories(self,path):
""" Get subdirectories of the given directory
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID)
return result
def recoverOrphanDirectories( self, credDict ):
""" Recover orphan directories
"""
# Find out orphan directories
treeTable = 'FC_DirectoryLevelTree'
req = "SELECT DirID,Parent FROM %s WHERE Parent NOT IN ( SELECT DirID from %s )" % (treeTable,treeTable)
result = self.db._query( req )
if not result['OK']:
return result
parentDict = {}
for dirID,parentID in result['Value']:
result = self.getDirectoryPath( dirID )
if not result['OK']:
continue
dirPath = result['Value']
parentPath = os.path.dirname( dirPath )
if not dirPath == '/':
parentDict.setdefault( parentPath, {} )
parentDict[parentPath].setdefault( 'DirList', [] )
parentDict[parentPath]['DirList'].append( dirID )
parentDict[parentPath]['OldParentID'] = parentID
for parentPath, dirDict in parentDict.items():
dirIDList = dirDict['DirList']
oldParentID = dirDict['OldParentID']
result = self.findDir( parentPath )
if not result['OK']:
continue
if result['Value']:
# The parent directory was recreated already
parentID = result['Value']
else:
# The parent directory was lost
result = self.makeDirectories( parentPath, credDict )
if not result['OK']:
continue
parentID = result['Value']
# We have created a new directory but let's keep the old ID
req = "UPDATE FC_DirectoryLevelTree SET DirID=%s WHERE DirID=%s" % ( oldParentID, parentID )
result = self.db._update( req )
if not result['OK']:
continue
req = "UPDATE FC_DirectoryInfo SET DirID=%s WHERE DirID=%s" % ( oldParentID, parentID )
result = self.db._update( req )
parentID = oldParentID
# We have to change also the ownership of the new directory to the most likely one
# which is the owner of the containing directory
containerPath = os.path.dirname( parentPath )
result = self.getDirectoryParameters( containerPath )
if result['OK']:
conDict = result['Value']
uid = conDict['UID']
gid = conDict['GID']
result = self._setDirectoryUid(parentID,uid)
result = self._setDirectoryGid(parentID,gid)
dirString = ','.join( [ str(dirID) for dirID in dirIDList ] )
req = "UPDATE FC_DirectoryLevelTree SET Parent=%s WHERE DirID IN (%s)" % ( parentID, dirString )
result = self.db._update( req )
if not result['OK']:
continue
connection = self._getConnection()
result = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE", connection )
if not result['OK']:
self.db._query("UNLOCK TABLES", connection )
return result
result = self.__rebuildLevelIndexes( parentID, connection)
self.db._query("UNLOCK TABLES", connection )
return S_OK()
def _getConnection( self, connection=False ):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
return connection
def __rebuildLevelIndexes( self, parentID, connection=False ):
""" Rebuild level indexes for all the subdirectories
"""
result = self.__getNumericPath( parentID, connection )
if not result['OK']:
return result
parentIndexList = result['Value']
parentLevel = result['Level']
result = self.getChildren( parentID, connection )
if not result['OK']:
return result
subIDList = result['Value']
indexList = list( parentIndexList )
indexList.append( 0 )
for dirID in subIDList:
indexList[-1] += 1
lpaths = [ 'LPATH%d=%d' % (i+1,indexList[i]) for i in range(parentLevel+1) ]
lpathString = 'SET '+','.join( lpaths )
req = "UPDATE FC_DirectoryLevelTree %s WHERE DirID=%s" % ( lpathString, dirID )
result = self.db._update( req, connection )
if not result['OK']:
return result
result = self.__rebuildLevelIndexes( dirID, connection )
return S_OK()
|
Sbalbp/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectoryLevelTree.py
|
Python
|
gpl-3.0
| 19,028
|
[
"DIRAC"
] |
8a40b8765d7d3fd025fb3152f8f7d655ce243f249e0516f615532329e9b533cc
|
# Copyright (C) 2010 by CAMd, DTU
# Please see the accompanying LICENSE file for further information.
# This file is taken (almost) verbatim from CMR with D. Landis agreement
FIELD_SEPARATOR="\\"
PARA_START="\n\n"
PARA_END="\\\\@"
names = ["", "", "Computer_system", "Type_of_run", "Method", "Basis_set", "Chemical_formula", "Person", "Date", "", "", "", "", "Title", ""] #[Charge,Multi]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
names_compact = ["", "", "Computer_system", "Type_of_run", "Method", "Basis_set", "Chemical_formula", "Person", "Date", "", "", "", "", "Title", ""] #[Charge,Multi]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
charge_multiplicity = 15
class GaussianReader:
def auto_type(self, data):
""" tries to determine type"""
try:
return float(data)
except ValueError:
pass
try:
ds = data.split(",")
array = []
for d in ds:
array.append(float(d))
return array
except ValueError:
pass
return data
def __init__(self, filename):
"""filename is optional; if not set, use parse to set the content"""
if not filename is None:
fin = file(filename)
content = fin.read()
fin.close()
#handles the case that users used windows after the calculation:
content = content.replace("\r\n", "\n")
self.parse(content)
def parse(self, content):
from ase.data import atomic_numbers
self.data = []
temp_items = content.split(PARA_START)
seq_count = 0
for i in temp_items:
i=i.replace("\n ", "")
if i.endswith(PARA_END):
i = i.replace(PARA_END, "")
i = i.split(FIELD_SEPARATOR)
new_dict = {}
self.data.append(new_dict)
new_dict["Sequence number"] = seq_count
seq_count += 1
for pos in range(len(names)):
if names[pos]!="":
new_dict[names[pos]] = self.auto_type(i[pos])
chm = i[charge_multiplicity].split(",")
new_dict["Charge"] = int(chm[0])
new_dict["Multiplicity"] = int(chm[1])
#Read atoms
atoms = []
positions = []
position = charge_multiplicity+1
while position<len(i) and i[position]!="":
s = i[position].split(",")
atoms.append(atomic_numbers[s[0]])
positions.append([float(s[1]), float(s[2]), float(s[3])])
position = position + 1
new_dict["Atomic_numbers"]=atoms
new_dict["Positions"]=positions
#Read more variables
position +=1
while position<len(i) and i[position]!="":
s = i[position].split("=")
if len(s)==2:
new_dict[s[0]]=self.auto_type(s[1])
else:
print "Warning: unexpected input ",s
position = position + 1
def __iter__(self):
"""returns an iterator that iterates over all keywords"""
return self.data.__iter__()
def __len__(self):
return len(self.data)
def __getitem__(self, pos):
return self.data[pos]
|
grhawk/ASE
|
tools/ase/io/gaussian_reader.py
|
Python
|
gpl-2.0
| 3,674
|
[
"ASE"
] |
62e09ecf2fc0743a3b694d7292a145fff6ac5fc38550621dcc1bb0d66c9f044f
|
#!/usr/bin/env python
"""
Implementation for `pmg potcar` CLI.
"""
import os
from pymatgen.io.vasp import Potcar
def proc_dir(dirname, procfilefunction):
"""
Process a directory.
Args:
dirname (str): Directory name.
procfilefunction (callable): Callable to execute on directory.
"""
for f in os.listdir(dirname):
if os.path.isdir(os.path.join(dirname, f)):
proc_dir(os.path.join(dirname, f), procfilefunction)
else:
procfilefunction(dirname, f)
def gen_potcar(dirname, filename):
"""
Generate POTCAR from POTCAR.spec in directories.
Args:
dirname (str): Directory name.
filename (str): Filename in directory.
"""
if filename == "POTCAR.spec":
fullpath = os.path.join(dirname, filename)
with open(fullpath) as f:
elements = f.readlines()
symbols = [el.strip() for el in elements if el.strip() != ""]
potcar = Potcar(symbols)
potcar.write_file(os.path.join(dirname, "POTCAR"))
def generate_potcar(args):
"""
Generate POTCAR.
Args:
args (dict): Args from argparse.
"""
if args.recursive:
proc_dir(args.recursive, gen_potcar)
elif args.symbols:
try:
p = Potcar(args.symbols, functional=args.functional)
p.write_file("POTCAR")
except Exception as ex:
print(f"An error has occurred: {str(ex)}")
else:
print("No valid options selected.")
if __name__ == "__main__":
proc_dir(os.getcwd(), gen_potcar)
|
vorwerkc/pymatgen
|
pymatgen/cli/pmg_potcar.py
|
Python
|
mit
| 1,580
|
[
"VASP",
"pymatgen"
] |
64e9a1ee3c8f07f5291eaa29f1b08788b57e86f10b73545ae34a395e036cb539
|
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from collections import defaultdict
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, _adjust_start_date_for_beta_testers
from courseware.courses import (
get_courses, get_course,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
)
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from open_ended_grading import open_ended_notifications
from student.models import UserTestGroup, CourseEnrollment
from student.views import single_course_reverification_info, is_course_blocked
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location, navigation_index
from xmodule.tabs import CourseTabList, StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(user, request, parent_descriptor, field_data_cache, current_module.location.course_key)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404("Position {} is not an integer!".format(position))
user = request.user
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
masquerade = setup_masquerade(request, course_key, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course_key)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_key),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
field_data_cache,
course_key,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
(course_key, chapter, section, position) = path_to_location(modulestore(), usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=unicode(course_key))
elif section is None:
return redirect('courseware_chapter', course_id=unicode(course_key), chapter=chapter)
elif position is None:
return redirect(
'courseware_section',
course_id=unicode(course_key),
chapter=chapter,
section=section
)
else:
# Here we use the navigation_index from the position returned from
# path_to_location - we can only navigate to the topmost vertical at the
# moment
return redirect(
'courseware_position',
course_id=unicode(course_key),
chapter=chapter,
section=section,
position=navigation_index(position)
)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, request.user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
masquerade = setup_masquerade(request, course_key, staff_access) # allow staff to masquerade on the info page
reverifications = fetch_reverify_banner_info(request, course_key)
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(request.user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'reverifications': reverifications,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(request.user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = has_access(request.user, 'staff', course)
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = (
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id.to_deprecated_string())
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_key),
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_key):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
if enrollment is not None:
course = modulestore().get_course(course_key)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(
course_id=course_key,
module_state_key=usage_key,
student_id=student.id
)
except User.DoesNotExist:
return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.type in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.type](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] > success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is already being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id)
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""Track an successfully certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate' # pylint: disable=no-member
tracking_context = tracker.get_tracker().resolve_context() # pylint: disable=no-member
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
|
beni55/edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 56,361
|
[
"VisIt"
] |
e7ae041f568c85551f877047728339b909a7dde1d697d3bf109b3d3eed464590
|
import molecule
import qcjob
# create the molecule from xyz file
h2 = molecule.from_xyz('geometries/hydrogen.xyz')
# generate Q-Chem job
job = qcjob.QCjob(h2, rems={"gen_scfman": "true"})
job.run()
# if you wish to test the JMol viewer, uncomment to following line:
# h2.to_jmol()
|
EhudTsivion/QCkit
|
examples/simple_job.py
|
Python
|
lgpl-3.0
| 285
|
[
"Jmol",
"Q-Chem"
] |
b8306693a514218dbf8c9840e4e272bb1f9968ecfeed76bba0948088fe2f298e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements plotter for DOS and band structure.
"""
import logging
from collections import OrderedDict, namedtuple
import numpy as np
import scipy.constants as const
from monty.json import jsanitize
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.gruneisen import GruneisenPhononBandStructureSymmLine
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt, pretty_plot
logger = logging.getLogger(__name__)
FreqUnits = namedtuple("FreqUnits", ["factor", "label"])
def freq_units(units):
"""
Args:
units: str, accepted values: thz, ev, mev, ha, cm-1, cm^-1
Returns:
Returns conversion factor from THz to the requred units and the label in the form of a namedtuple
"""
d = {
"thz": FreqUnits(1, "THz"),
"ev": FreqUnits(const.value("hertz-electron volt relationship") * const.tera, "eV"),
"mev": FreqUnits(
const.value("hertz-electron volt relationship") * const.tera / const.milli,
"meV",
),
"ha": FreqUnits(const.value("hertz-hartree relationship") * const.tera, "Ha"),
"cm-1": FreqUnits(
const.value("hertz-inverse meter relationship") * const.tera * const.centi,
"cm^{-1}",
),
"cm^-1": FreqUnits(
const.value("hertz-inverse meter relationship") * const.tera * const.centi,
"cm^{-1}",
),
}
try:
return d[units.lower().strip()]
except KeyError:
raise KeyError("Value for units `{}` unknown\nPossible values are:\n {}".format(units, list(d.keys())))
class PhononDosPlotter:
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
"""
def __init__(self, stack=False, sigma=None):
"""
Args:
stack: Whether to plot the DOS as a stacked area graph
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma else dos.densities
self._doses[label] = {"frequencies": dos.frequencies, "densities": densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None, units="thz"):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors # pylint: disable=E1101
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos["frequencies"] * u.factor
densities = dos["densities"]
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors], label=str(key))
else:
plt.plot(
frequencies,
densities,
color=colors[i % ncolors],
label=str(key),
linewidth=3,
)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, "k--", linewidth=2)
plt.xlabel(r"$\mathrm{{Frequencies\ ({})}}$".format(u.label))
plt.ylabel(r"$\mathrm{Density\ of\ states}$")
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
def show(self, xlim=None, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.show()
class PhononBSPlotter:
"""
Class to plot or get data to facilitate the plot of band structure objects.
"""
def __init__(self, bs):
"""
Args:
bs: A PhononBandStructureSymmLine object.
"""
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)"
)
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks["distance"], ticks["label"]))
for i, tt in enumerate(temp_ticks):
if i == 0:
uniq_d.append(tt[0])
uniq_l.append(tt[1])
logger.debug("Adding label {l} at {d}".format(l=tt[0], d=tt[1]))
else:
if tt[1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(i=tt[1]))
else:
logger.debug("Adding label {l} at {d}".format(l=tt[0], d=tt[1]))
uniq_d.append(tt[0])
uniq_l.append(tt[1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks["label"])):
if ticks["label"][i] is not None:
# don't print the same label twice
if i != 0:
if ticks["label"][i] == ticks["label"][i - 1]:
logger.debug("already print label... " "skipping label {i}".format(i=ticks["label"][i]))
else:
logger.debug(
"Adding a line at {d} for label {l}".format(d=ticks["distance"][i], l=ticks["label"][i])
)
plt.axvline(ticks["distance"][i], color="k")
else:
logger.debug(
"Adding a line at {d} for label {l}".format(d=ticks["distance"][i], l=ticks["label"][i])
)
plt.axvline(ticks["distance"][i], color="k")
return plt
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
lattice: The reciprocal lattice.
"""
distance = []
frequency = []
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
distance.append([self._bs.distance[j] for j in range(b["start_index"], b["end_index"] + 1)])
for i in range(self._nb_bands):
frequency[-1].append([self._bs.bands[i][j] for j in range(b["start_index"], b["end_index"] + 1)])
return {
"ticks": ticks,
"distances": distance,
"frequency": frequency,
"lattice": self._bs.lattice_rec.as_dict(),
}
def get_plot(self, ylim=None, units="thz"):
"""
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data["distances"])):
for i in range(self._nb_bands):
plt.plot(
data["distances"][d],
[data["frequency"][d][i][j] * u.factor for j in range(len(data["distances"][d]))],
"b-",
linewidth=band_linewidth,
)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color="k")
# Main X and Y Labels
plt.xlabel(r"$\mathrm{Wave\ Vector}$", fontsize=30)
ylabel = r"$\mathrm{{Frequencies\ ({})}}$".format(u.label)
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data["distances"][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim, units=units)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]["name"]
for i, c in enumerate(self._bs.qpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b["start_index"] <= i <= b["end_index"]:
this_branch = b["name"]
break
if c.label != previous_label and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {"distance": tick_distance, "label": tick_labels}
def plot_compare(self, other_plotter, units="thz"):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is the one of the band structure
used to build the PhononBSPlotter
Args:
other_plotter: another PhononBSPlotter object defined along the same symmetry lines
units:
Returns:
a matplotlib object with both band structures
"""
u = freq_units(units)
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig["distances"]) != len(data["distances"]):
raise ValueError("The two objects are not compatible.")
plt = self.get_plot(units=units)
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig["distances"])):
plt.plot(
data_orig["distances"][d],
[data["frequency"][d][i][j] * u.factor for j in range(len(data_orig["distances"][d]))],
"r-",
linewidth=band_linewidth,
)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for q in self._bs.qpoints:
if q.label:
labels[q.label] = q.frac_coords
lines = []
for b in self._bs.branches:
lines.append(
[
self._bs.qpoints[b["start_index"]].frac_coords,
self._bs.qpoints[b["end_index"]].frac_coords,
]
)
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
class ThermoPlotter:
"""
Plotter for thermodynamic properties obtained from phonon DOS.
If the structure corresponding to the DOS, it will be used to extract the forumla unit and provide
the plots in units of mol instead of mole-cell
"""
def __init__(self, dos, structure=None):
"""
Args:
dos: A PhononDos object.
structure: A Structure object corresponding to the structure used for the calculation.
"""
self.dos = dos
self.structure = structure
def _plot_thermo(self, func, temperatures, factor=1, ax=None, ylabel=None, label=None, ylim=None, **kwargs):
"""
Plots a thermodynamic property for a generic function from a PhononDos instance.
Args:
func: the thermodynamic function to be used to calculate the property
temperatures: a list of temperatures
factor: a multiplicative factor applied to the thermodynamic property calculated. Used to change
the units.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
ylabel: label for the y axis
label: label of the plot
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
values = []
for t in temperatures:
values.append(func(t, structure=self.structure) * factor)
ax.plot(temperatures, values, label=label, **kwargs)
if ylim:
ax.set_ylim(ylim)
ax.set_xlim((np.min(temperatures), np.max(temperatures)))
ylim = plt.ylim()
if ylim[0] < 0 < ylim[1]:
plt.plot(plt.xlim(), [0, 0], "k-", linewidth=1)
ax.set_xlabel(r"$T$ (K)")
if ylabel:
ax.set_ylabel(ylabel)
return fig
@add_fig_kwargs
def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$C_v$ (J/K/mol)"
else:
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$S$ (J/K/mol)"
else:
ylabel = r"$S$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta E$ (kJ/mol)"
else:
ylabel = r"$\Delta E$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=1e-3, **kwargs)
return fig
@add_fig_kwargs
def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational contribution to the Helmoltz free energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta F$ (kJ/mol)"
else:
ylabel = r"$\Delta F$ (kJ/mol-c)"
fig = self._plot_thermo(
self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=1e-3, **kwargs
)
return fig
@add_fig_kwargs
def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots all the thermodynamic properties in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
mol = "" if self.structure else "-c"
fig = self._plot_thermo(
self.dos.cv,
temperatures,
ylabel="Thermodynamic properties",
ylim=ylim,
label=r"$C_v$ (J/K/mol{})".format(mol),
**kwargs,
)
self._plot_thermo(
self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0], label=r"$S$ (J/K/mol{})".format(mol), **kwargs
)
self._plot_thermo(
self.dos.internal_energy,
temperatures,
ylim=ylim,
ax=fig.axes[0],
factor=1e-3,
label=r"$\Delta E$ (kJ/mol{})".format(mol),
**kwargs,
)
self._plot_thermo(
self.dos.helmholtz_free_energy,
temperatures,
ylim=ylim,
ax=fig.axes[0],
factor=1e-3,
label=r"$\Delta F$ (kJ/mol{})".format(mol),
**kwargs,
)
fig.axes[0].legend(loc="best")
return fig
class GruneisenPlotter:
"""
Class to plot Gruneisenparameter Object
"""
def __init__(self, gruneisen):
"""
Class to plot information from Gruneisenparameter Object
Args:
gruneisen: GruneisenParameter Object
"""
self._gruneisen = gruneisen
def get_plot(self, marker="o", markersize=None, units="thz"):
"""
will produce a plot
Args:
marker: marker for the depiction
markersize: size of the marker
units: unit for the plots, accepted units: thz, ev, mev, ha, cm-1, cm^-1
Returns: plot
"""
u = freq_units(units)
x = self._gruneisen.frequencies.flatten() * u.factor
y = self._gruneisen.gruneisen.flatten()
plt = pretty_plot(12, 8)
plt.xlabel(r"$\mathrm{{Frequency\ ({})}}$".format(u.label))
plt.ylabel(r"$\mathrm{Grüneisen\ parameter}$")
n = len(y) - 1
for i, (y, x) in enumerate(zip(y, x)):
color = (1.0 / n * i, 0, 1.0 / n * (n - i))
if markersize:
plt.plot(x, y, marker, color=color, markersize=markersize)
else:
plt.plot(x, y, marker, color=color)
plt.tight_layout()
return plt
def show(self, units="thz"):
"""
will show the plot
Args:
units: units for the plot, accepted units: thz, ev, mev, ha, cm-1, cm^-1
Returns: plot
"""
plt = self.get_plot(units=units)
plt.show()
def save_plot(self, filename, img_format="pdf", units="thz"):
"""
Will save the plot to a file
Args:
filename: name of the filename
img_format: format of the saved plot
units: accepted units: thz, ev, mev, ha, cm-1, cm^-1
Returns:
"""
plt = self.get_plot(units=units)
plt.savefig(filename, format=img_format)
plt.close()
class GruneisenPhononBSPlotter(PhononBSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects.
"""
def __init__(self, bs):
"""
Args:
bs: A GruneisenPhononBandStructureSymmLine object.
"""
if not isinstance(bs, GruneisenPhononBandStructureSymmLine):
raise ValueError(
"GruneisenPhononBSPlotter only works with GruneisenPhononBandStructureSymmLine objects. "
"A GruneisenPhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)"
)
super().__init__(bs)
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
gruneisen: GruneisenPhononBandStructureSymmLine
lattice: The reciprocal lattice.
"""
distance, frequency, gruneisen = ([] for _ in range(3))
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
gruneisen.append([])
distance.append([self._bs.distance[j] for j in range(b["start_index"], b["end_index"] + 1)])
for i in range(self._nb_bands):
frequency[-1].append([self._bs.bands[i][j] for j in range(b["start_index"], b["end_index"] + 1)])
gruneisen[-1].append([self._bs.gruneisen[i][j] for j in range(b["start_index"], b["end_index"] + 1)])
return {
"ticks": ticks,
"distances": distance,
"frequency": frequency,
"gruneisen": gruneisen,
"lattice": self._bs.lattice_rec.as_dict(),
}
def get_plot_gs(self, ylim=None):
"""
Get a matplotlib object for the gruneisen bandstructure plot.
Args:
ylim: Specify the y-axis (gruneisen) limits; by default None let
the code choose.
"""
plt = pretty_plot(12, 8)
# band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data["distances"])):
for i in range(self._nb_bands):
plt.plot(
data["distances"][d],
[data["gruneisen"][d][i][j] for j in range(len(data["distances"][d]))],
"b-",
# linewidth=band_linewidth)
marker="o",
markersize=2,
linewidth=2,
)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color="k")
# Main X and Y Labels
plt.xlabel(r"$\mathrm{Wave\ Vector}$", fontsize=30)
plt.ylabel(r"$\mathrm{Grüneisen\ Parameter}$", fontsize=30)
# X range (K)
# last distance point
x_max = data["distances"][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show_gs(self, ylim=None):
"""
Show the plot using matplotlib.
Args:
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot_gs(ylim)
plt.show()
def save_plot_gs(self, filename, img_format="eps", ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot_gs(ylim=ylim)
plt.savefig(filename, format=img_format)
plt.close()
def plot_compare_gs(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another GruneisenPhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig["distances"]) != len(data["distances"]):
raise ValueError("The two objects are not compatible.")
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig["distances"])):
plt.plot(
data_orig["distances"][d],
[data["gruneisen"][d][i][j] for j in range(len(data_orig["distances"][d]))],
"r-",
linewidth=band_linewidth,
)
return plt
|
gmatteo/pymatgen
|
pymatgen/phonon/plotter.py
|
Python
|
mit
| 31,427
|
[
"Gaussian",
"pymatgen"
] |
737185e5a991a35c9a864148212fdf21ae4c10735425cb4d13f1561cec8daa48
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python import layers as tf_layers
from models.conv_lstm import basic_conv_lstm_cell
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
FC_LAYER_SIZE = 256
FC_LSTM_LAYER_SIZE = 128
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def encoder_model(frames, sequence_length, initializer, scope='encoder', fc_conv_layer=False):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
sequence_length: number of frames that shall be encoded
scope: tensorflow variable scope name
initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)
fc_conv_layer: adds an fc layer at the end of the encoder
Returns:
hidden4: hidden state of highest ConvLSTM layer
fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added
"""
lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state6 = None, None, None, None, None, None
for i in range(sequence_length):
frame = frames[:,i,:,:,:]
reuse = (i > 0)
with tf.variable_scope(scope, reuse=reuse):
#LAYER 1: conv1
conv1 = slim.layers.conv2d(frame, 16, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm1'})
#LAYER 2: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 16, initializer, filter_size=5, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
#LAYER 3: conv2
conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm3'})
#LAYER 4: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 16, initializer, filter_size=5, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4')
#LAYER 5: conv3
conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [5, 5], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm5'})
#LAYER 6: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6')
#LAYER 7: conv4
conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm7'})
#LAYER 8: convLSTM4 (8x8 featuremap size)
hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 32, initializer, filter_size=3, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8')
#LAYER 8: conv5
conv5 = slim.layers.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv5', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm9'})
# LAYER 9: convLSTM5 (4x84 featuremap size)
hidden5, lstm_state5 = basic_conv_lstm_cell(conv5, lstm_state5, 32, initializer, filter_size=3, scope='convlstm5')
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm10')
#LAYER 10: Fully Convolutional Layer (4x4x32 --> 1x1xFC_LAYER_SIZE)
fc_conv = slim.layers.conv2d(hidden5, FC_LAYER_SIZE, [4,4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
#LAYER 11: Fully Convolutional LSTM (1x1x256 -> 1x1x128)
hidden6, lstm_state6 = basic_conv_lstm_cell(fc_conv, lstm_state6, FC_LSTM_LAYER_SIZE, initializer, filter_size=1, scope='convlstm6')
hidden_repr = hidden6
return hidden_repr
def decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, scope='decoder', fc_conv_layer=False):
"""
Args:
hidden_repr: Tensor of latent space representation
sequence_length: number of frames that shall be decoded from the hidden_repr
num_channels: number of channels for generated frames
initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)
fc_conv_layer: adds an fc layer at the end of the encoder
Returns:
frame_gen: array of generated frames (Tensors)
fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added
"""
frame_gen = []
lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state0 = None, None, None, None, None, None
assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1)
for i in range(sequence_length):
reuse = (i > 0) #reuse variables (recurrence) after first time step
with tf.variable_scope(scope, reuse=reuse):
#Fully Convolutional Layer (1x1xFC_LAYER_SIZE -> 4x4x16)
hidden0, lstm_state0 = basic_conv_lstm_cell(hidden_repr, lstm_state0, FC_LAYER_SIZE, initializer, filter_size=1,
scope='convlstm0')
fc_conv = slim.layers.conv2d_transpose(hidden0, 32, [4, 4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
#LAYER 1: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(fc_conv, lstm_state1, 32, initializer, filter_size=3, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1')
#LAYER 2: upconv1 (8x8 -> 16x16)
upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm2'})
#LAYER 3: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 32, initializer, filter_size=3, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
#LAYER 4: upconv2 (16x16 -> 32x32)
upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm4'})
#LAYER 5: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5')
# LAYER 6: upconv3 (32x32 -> 64x64)
upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm6'})
#LAYER 7: convLSTM4
hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 16, initializer, filter_size=5, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7')
#Layer 8: upconv4 (64x64 -> 128x128)
upconv4 = slim.layers.conv2d_transpose(hidden4, 16, 5, stride=2, scope='upconv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm8'})
#LAYER 9: convLSTM5
hidden5, lstm_state5 = basic_conv_lstm_cell(upconv4, lstm_state5, 16, initializer, filter_size=5, scope='convlstm5')
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm9')
upconv5 = slim.layers.conv2d_transpose(hidden5, num_channels, 5, stride=2, scope='upconv5', weights_initializer=initializer)
frame_gen.append(upconv5)
assert len(frame_gen)==sequence_length
return frame_gen
def composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, uniform_init=True, num_channels=3, fc_conv_layer=True):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
encoder_len: number of frames that shall be encoded
decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr
uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform)
num_channels: number of channels for generated frames
fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder
Returns:
frame_gen: array of generated frames (Tensors)
"""
assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]])
initializer = tf_layers.xavier_initializer(uniform=uniform_init)
hidden_repr = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer)
frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels,
scope='decoder_pred', fc_conv_layer=fc_conv_layer)
frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels,
scope='decoder_reconst', fc_conv_layer=fc_conv_layer)
return frames_pred, frames_reconst, hidden_repr
|
jonasrothfuss/DeepEpisodicMemory
|
models/model_zoo/model_conv5_fc_lstm_128.py
|
Python
|
mit
| 9,768
|
[
"Gaussian"
] |
d0554cf4b1a847dfa621da8d6263bf99dd5a7420047e1dc17735be9494d5eca8
|
"""Dynamical matrix classes."""
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import warnings
from typing import Type, Union
import numpy as np
from phonopy.harmonic.dynmat_to_fc import DynmatToForceConstants
from phonopy.structure.atoms import PhonopyAtoms
from phonopy.structure.cells import Primitive, sparse_to_dense_svecs
class DynamicalMatrix:
"""Dynamical matrix base class.
When prmitive and supercell lattices are L_p and L_s, respectively,
frame F is defined by
L_p = dot(F, L_s), then L_s = dot(F^-1, L_p).
where lattice matrix is defined by axies a,b,c in Cartesian:
[ a1 a2 a3 ]
L = [ b1 b2 b3 ]
[ c1 c2 c3 ]
Phase difference in primitive cell unit
between atoms 1 and 2 in supercell is calculated by, e.g.,
1j * dot((x_s(2) - x_s(1)), F^-1) * 2pi
where x_s is reduced atomic coordinate in supercell unit.
Attributes
----------
primitive: Primitive
Primitive cell instance. Note that Primitive is inherited from
PhonopyAtoms.
supercell: PhonopyAtoms.
Supercell instance.
force_constants: ndarray
Supercell force constants. Full and compact shapes of arrays are
supported.
dtype='double'
shape=(supercell atoms, supercell atoms, 3, 3) for full array
shape=(primitive atoms, supercell atoms, 3, 3) for compact array
dynatmical_matrix: ndarray
Dynamical matrix at specified q.
dtype=complex of "c%d" % (np.dtype('double').itemsize * 2)
shape=(primitive atoms * 3, primitive atoms * 3)
"""
# Non analytical term correction
_nac = False
def __init__(
self,
supercell: PhonopyAtoms,
primitive: Primitive,
force_constants,
decimals=None,
):
"""Init method.
Parameters
----------
supercell : PhonopyAtoms.
Supercell.
primitive : Primitive
Primitive cell.
force_constants : array_like
Supercell force constants. Full and compact shapes of arrays are
supported.
shape=(supercell atoms, supercell atoms, 3, 3) for full FC.
shape=(primitive atoms, supercell atoms, 3, 3) for compact FC.
dtype='double'
decimals : int, optional, default=None
Number of decimals. Use like dm.round(decimals).
"""
self._scell = supercell
self._pcell = primitive
self._decimals = decimals
self._dynamical_matrix = None
self._force_constants = None
self._set_force_constants(force_constants)
self._dtype_complex = "c%d" % (np.dtype("double").itemsize * 2)
self._p2s_map = np.array(self._pcell.p2s_map, dtype="int_")
self._s2p_map = np.array(self._pcell.s2p_map, dtype="int_")
p2p_map = self._pcell.p2p_map
self._s2pp_map = np.array(
[p2p_map[self._s2p_map[i]] for i in range(len(self._s2p_map))], dtype="int_"
)
svecs, multi = self._pcell.get_smallest_vectors()
if self._pcell.store_dense_svecs:
self._svecs = svecs
self._multi = multi
else:
self._svecs, self._multi = sparse_to_dense_svecs(svecs, multi)
def is_nac(self):
"""Return bool if NAC is considered or not."""
return self._nac
def get_dimension(self):
"""Return number of bands."""
warnings.warn(
"DynamicalMatrix.get_dimension() is deprecated.", DeprecationWarning
)
return len(self._pcell) * 3
@property
def decimals(self):
"""Return number of decimals of dynamical matrix values."""
return self._decimals
def get_decimals(self):
"""Return number of decimals of dynamical matrix values."""
warnings.warn(
"DynamicalMatrix.get_decimals() is deprecated."
"Use DynamicalMatrix.decimals attribute.",
DeprecationWarning,
)
return self.decimals
@property
def supercell(self):
"""Return supercell."""
return self._scell
def get_supercell(self):
"""Return supercell."""
warnings.warn(
"DynamicalMatrix.get_supercell() is deprecated."
"Use DynamicalMatrix.supercell attribute.",
DeprecationWarning,
)
return self.supercell
@property
def primitive(self) -> Primitive:
"""Return primitive cell."""
return self._pcell
def get_primitive(self):
"""Return primitive cell."""
warnings.warn(
"DynamicalMatrix.get_primitive() is deprecated."
"Use DynamicalMatrix.primitive attribute.",
DeprecationWarning,
)
return self.primitive
@property
def force_constants(self):
"""Return supercell force constants."""
return self._force_constants
def get_force_constants(self):
"""Return supercell force constants."""
warnings.warn(
"DynamicalMatrix.get_force_constants() is deprecated."
"Use DynamicalMatrix.force_constants attribute.",
DeprecationWarning,
)
return self.force_constants
@property
def dynamical_matrix(self):
"""Return dynamcial matrix calculated at q.
Returns
-------
ndarray
shape=(natom * 3, natom *3)
dtype=complex of "c%d" % (np.dtype('double').itemsize * 2)
"""
dm = self._dynamical_matrix
if self._dynamical_matrix is None:
return None
if self._decimals is None:
return dm
else:
return dm.round(decimals=self._decimals)
def get_dynamical_matrix(self):
"""Return dynamcial matrix calculated at q."""
warnings.warn(
"DynamicalMatrix.get_get_dynamical_matrix() is "
"deprecated."
"Use DynamicalMatrix.get_dynamical_matrix attribute.",
DeprecationWarning,
)
return self.dynamical_matrix
def run(self, q, lang="C"):
"""Run dynamical matrix calculation at a q-point.
q : array_like
q-point in fractional coordinates without 2pi.
shape=(3,), dtype='double'
"""
self._run(q, lang=lang)
def set_dynamical_matrix(self, q):
"""Run dynamical matrix calculation at a q-point."""
warnings.warn(
"DynamicalMatrix.set_dynamical_matrix() is deprecated."
"Use DynamicalMatrix.run().",
DeprecationWarning,
)
self.run(q)
def _run(self, q, lang="C"):
if lang == "C":
self._run_c_dynamical_matrix(q)
else:
self._run_py_dynamical_matrix(q)
def _set_force_constants(self, fc):
if (
type(fc) is np.ndarray
and fc.dtype is np.double
and fc.flags.aligned
and fc.flags.owndata
and fc.flags.c_contiguous
): # noqa E129
self._force_constants = fc
else:
self._force_constants = np.array(fc, dtype="double", order="C")
def _run_c_dynamical_matrix(self, q):
import phonopy._phonopy as phonoc
fc = self._force_constants
mass = self._pcell.masses
size_prim = len(mass)
dm = np.zeros((size_prim * 3, size_prim * 3), dtype=self._dtype_complex)
if fc.shape[0] == fc.shape[1]: # full-fc
s2p_map = self._s2p_map
p2s_map = self._p2s_map
else: # compact-fc
s2p_map = self._s2pp_map
p2s_map = np.arange(len(self._p2s_map), dtype="int_")
phonoc.dynamical_matrix(
dm.view(dtype="double"),
fc,
np.array(q, dtype="double"),
self._svecs,
self._multi,
mass,
s2p_map,
p2s_map,
)
# Data of dm array are stored in memory by the C order of
# (size_prim * 3, size_prim * 3, 2), where the last 2 means
# real and imaginary parts. This code assumes this memory
# order is that expected by numpy. Otherwise, numpy complex array
# should be created as follows:
# dm_double = dm.view(dtype='double').reshape(size_prim * 3,
# size_prim * 3, 2)
# dm = dm_double[:, :, 0] + 1j * dm_double[:, :, 1]
self._dynamical_matrix = dm
def _run_py_dynamical_matrix(self, q):
"""Python implementation of building dynamical matrix.
This is not used in production.
This works only with full-fc.
"""
fc = self._force_constants
svecs = self._svecs
multi = self._multi
num_atom = len(self._pcell)
dm = np.zeros((3 * num_atom, 3 * num_atom), dtype=self._dtype_complex)
mass = self._pcell.masses
if fc.shape[0] == fc.shape[1]:
is_compact_fc = False
else:
is_compact_fc = True
for i, s_i in enumerate(self._pcell.p2s_map):
if is_compact_fc:
fc_elem = fc[i]
else:
fc_elem = fc[s_i]
for j, s_j in enumerate(self._pcell.p2s_map):
sqrt_mm = np.sqrt(mass[i] * mass[j])
dm_local = np.zeros((3, 3), dtype=self._dtype_complex)
# Sum in lattice points
for k in range(len(self._scell)):
if s_j == self._s2p_map[k]:
m, adrs = multi[k][i]
svecs_at = svecs[adrs : adrs + m]
phase = []
for ll in range(m):
vec = svecs_at[ll]
phase.append(np.vdot(vec, q) * 2j * np.pi)
phase_factor = np.exp(phase).sum()
dm_local += fc_elem[k] * phase_factor / sqrt_mm / m
dm[(i * 3) : (i * 3 + 3), (j * 3) : (j * 3 + 3)] += dm_local
# Impose Hermisian condition
self._dynamical_matrix = (dm + dm.conj().transpose()) / 2
class DynamicalMatrixNAC(DynamicalMatrix):
"""Dynamical matrix with NAC base class."""
_nac = True
def __init__(
self,
supercell: PhonopyAtoms,
primitive: Primitive,
force_constants,
symprec=1e-5,
decimals=None,
log_level=0,
):
"""Init method.
Parameters
----------
supercell : PhonopyAtoms
Supercell.
primitive : Primitive
Primitive cell.
force_constants : array_like
Supercell force constants. Full and compact shapes of arrays are
supported.
shape=(supercell atoms, supercell atoms, 3, 3) for full FC.
shape=(primitive atoms, supercell atoms, 3, 3) for compact FC.
dtype='double'
symprec : float, optional, defualt=1e-5
Symmetri tolerance.
decimals : int, optional, default=None
Number of decimals. Use like dm.round(decimals).
log_levelc : int, optional, defualt=0
Log level.
"""
super().__init__(supercell, primitive, force_constants, decimals=decimals)
self._symprec = symprec
self._log_level = log_level
def run(self, q, q_direction=None):
"""Calculate dynamical matrix at q-point.
q : array_like
q-point in fractional coordinates without 2pi.
shape=(3,), dtype='double'
q_direction : array_like
q-point direction from Gamma-point in fractional coordinates of
reciprocal basis vectors. Only the direction is used, i.e.,
(q_direction / |q_direction|) is computed and used.
shape=(3,), dtype='double'
"""
rec_lat = np.linalg.inv(self._pcell.cell) # column vectors
if q_direction is None:
q_norm = np.linalg.norm(np.dot(q, rec_lat.T))
else:
q_norm = np.linalg.norm(np.dot(q_direction, rec_lat.T))
if q_norm < self._symprec:
self._run(q)
return False
self._compute_dynamical_matrix(q, q_direction)
@property
def born(self):
"""Return Born effective charge."""
return self._born
def get_born_effective_charges(self):
"""Return Born effective charge."""
warnings.warn(
"DynamicalMatrixNAC.get_born_effective_charges() is deprecated."
"Use DynamicalMatrixNAC.born attribute.",
DeprecationWarning,
)
return self.born
@property
def nac_factor(self):
"""Return NAC unit conversion factor."""
return self._unit_conversion * 4.0 * np.pi / self._pcell.volume
def get_nac_factor(self):
"""Return NAC unit conversion factor."""
warnings.warn(
"DynamicalMatrixNAC.get_nac_factor() is deprecated."
"Use DynamicalMatrixNAC.nac_factor attribute.",
DeprecationWarning,
)
return self.nac_factor
@property
def dielectric_constant(self):
"""Return dielectric constant."""
return self._dielectric
def get_dielectric_constant(self):
"""Return dielectric constant."""
warnings.warn(
"DynamicalMatrixNAC.get_dielectric_constant() is deprecated."
"Use DynamicalMatrixNAC.dielectric_constant attribute.",
DeprecationWarning,
)
return self.dielectric_constant
@property
def nac_method(self):
"""Return NAC method name."""
return self._method
def get_nac_method(self):
"""Return NAC method name."""
warnings.warn(
"DynamicalMatrixNAC.get_nac_method() is deprecated."
"Use DynamicalMatrixNAC.nac_method attribute.",
DeprecationWarning,
)
return self.nac_method
@property
def nac_params(self):
"""Return NAC basic parameters."""
return {"born": self.born, "factor": self.factor, "dielectric": self.dielectric}
@nac_params.setter
def nac_params(self, nac_params):
"""Set NAC parameters."""
self._set_nac_params(nac_params)
def set_nac_params(self, nac_params):
"""Set NAC parameters."""
warnings.warn(
"DynamicalMatrixNAC.set_nac_params() is deprecated."
"Use DynamicalMatrixNAC.nac_params attribute instead.",
DeprecationWarning,
)
self.nac_params = nac_params
@property
def symprec(self):
"""Return symmetry tolerance."""
return self._symprec
@property
def log_level(self):
"""Return log level."""
return self._log_level
def _set_nac_params(self, nac_params):
raise NotImplementedError()
def _set_basic_nac_params(self, nac_params):
"""Set basic NAC parameters."""
self._born = np.array(nac_params["born"], dtype="double", order="C")
self._unit_conversion = nac_params["factor"]
self._dielectric = np.array(nac_params["dielectric"], dtype="double", order="C")
def set_dynamical_matrix(self, q, q_direction=None):
"""Run dynamical matrix calculation at q-point."""
warnings.warn(
"DynamicalMatrixNAC.set_dynamical_matrix() is deprecated."
"Use DynamicalMatrixNAC.run().",
DeprecationWarning,
)
self.run(q, q_direction=q_direction)
def _get_charge_sum(self, num_atom, q, born):
nac_q = np.zeros((num_atom, num_atom, 3, 3), dtype="double", order="C")
A = np.dot(q, born)
for i in range(num_atom):
for j in range(num_atom):
nac_q[i, j] = np.outer(A[i], A[j])
return nac_q
def _get_constant_factor(self, q, dielectric, volume, unit_conversion):
return (
unit_conversion * 4.0 * np.pi / volume / np.dot(q.T, np.dot(dielectric, q))
)
def _compute_dynamical_matrix(self, q_red, q_direction):
raise NotImplementedError()
class DynamicalMatrixGL(DynamicalMatrixNAC):
"""Non analytical term correction (NAC) by Gonze and Lee."""
_method = "gonze"
def __init__(
self,
supercell: PhonopyAtoms,
primitive: Primitive,
force_constants,
nac_params=None,
num_G_points=None, # For Gonze NAC
decimals=None,
symprec=1e-5,
log_level=0,
):
"""Init method.
Parameters
----------
supercell : Supercell
Supercell.
primitive : Primitive
Primitive cell.
force_constants : array_like
Supercell force constants. Full and compact shapes of arrays are
supported.
shape=(supercell atoms, supercell atoms, 3, 3) for full FC.
shape=(primitive atoms, supercell atoms, 3, 3) for compact FC.
dtype='double'
symprec : float, optional, defualt=1e-5
Symmetri tolerance.
decimals : int, optional, default=None
Number of decimals. Use like dm.round(decimals).
log_levelc : int, optional, defualt=0
Log level.
"""
super().__init__(
supercell,
primitive,
force_constants,
symprec=symprec,
decimals=decimals,
log_level=log_level,
)
# For the method by Gonze et al.
self._Gonze_force_constants = None
if num_G_points is None:
self._num_G_points = 300
else:
self._num_G_points = num_G_points
self._G_list = None
self._G_cutoff = None
self._Lambda = None # 4*Lambda**2 is stored.
self._dd_q0 = None
if nac_params is not None:
self.nac_params = nac_params
@property
def Gonze_nac_dataset(self):
"""Return Gonze-Lee NAC dataset."""
return (
self._Gonze_force_constants,
self._dd_q0,
self._G_cutoff,
self._G_list,
self._Lambda,
)
def get_Gonze_nac_dataset(self):
"""Return Gonze-Lee NAC dataset."""
warnings.warn(
"DynamicalMatrixGL.get_Gonze_nac_dataset() is deprecated."
"Use DynamicalMatrixGL.Gonze_nac_dataset attribute instead.",
DeprecationWarning,
)
return self.Gonze_nac_dataset
def _set_nac_params(self, nac_params):
"""Set and prepare NAC parameters.
This is called via DynamicalMatrixNAC.nac_params.
"""
self._set_basic_nac_params(nac_params)
if "G_cutoff" in nac_params:
self._G_cutoff = nac_params["G_cutoff"]
else:
self._G_cutoff = (
3 * self._num_G_points / (4 * np.pi) / self._pcell.volume
) ** (1.0 / 3)
self._G_list = self._get_G_list(self._G_cutoff)
if "Lambda" in nac_params:
self._Lambda = nac_params["Lambda"]
else:
exp_cutoff = 1e-10
GeG = self._G_cutoff ** 2 * np.trace(self._dielectric) / 3
self._Lambda = np.sqrt(-GeG / 4 / np.log(exp_cutoff))
# self._H = self._get_H()
def make_Gonze_nac_dataset(self):
"""Prepare Gonze-Lee force constants.
Dipole-dipole interaction contribution is subtracted from
supercell force constants.
"""
try:
import phonopy._phonopy as phonoc # noqa F401
self._run_c_recip_dipole_dipole_q0()
except ImportError:
print(
"Python version of dipole-dipole calculation is not well "
"implemented."
)
sys.exit(1)
fc_shape = self._force_constants.shape
d2f = DynmatToForceConstants(
self._pcell, self._scell, is_full_fc=(fc_shape[0] == fc_shape[1])
)
dynmat = []
num_q = len(d2f.commensurate_points)
for i, q_red in enumerate(d2f.commensurate_points):
if self._log_level > 2:
print("%d/%d %s" % (i + 1, num_q, q_red))
self._run(q_red)
dm_dd = self._get_Gonze_dipole_dipole(q_red, None)
self._dynamical_matrix -= dm_dd
dynmat.append(self._dynamical_matrix)
d2f.dynamical_matrices = dynmat
d2f.run()
self._Gonze_force_constants = d2f.force_constants
self._Gonze_count = 0
def show_nac_message(self):
"""Show message on Gonze-Lee NAC method."""
print(
"Use NAC by Gonze et al. (no real space sum in current " "implementation)"
)
print(" PRB 50, 13035(R) (1994), PRB 55, 10355 (1997)")
print(
" G-cutoff distance: %4.2f, Number of G-points: %d, "
"Lambda: %4.2f" % (self._G_cutoff, len(self._G_list), self._Lambda)
)
def show_Gonze_nac_message(self):
"""Show message on Gonze-Lee NAC method."""
warnings.warn(
"DynamicalMatrixGL.show_Gonze_nac_message() is deprecated."
"Use DynamicalMatrixGL.show_nac_message instead.",
DeprecationWarning,
)
self.show_nac_message()
def _compute_dynamical_matrix(self, q_red, q_direction):
if self._Gonze_force_constants is None:
self.make_Gonze_nac_dataset()
if self._log_level > 2:
print("%d %s" % (self._Gonze_count + 1, q_red))
self._Gonze_count += 1
fc = self._force_constants
self._force_constants = self._Gonze_force_constants
self._run(q_red)
self._force_constants = fc
dm_dd = self._get_Gonze_dipole_dipole(q_red, q_direction)
self._dynamical_matrix += dm_dd
def _get_Gonze_dipole_dipole(self, q_red, q_direction):
rec_lat = np.linalg.inv(self._pcell.cell) # column vectors
q_cart = np.array(np.dot(q_red, rec_lat.T), dtype="double")
if q_direction is None:
q_dir_cart = None
else:
q_dir_cart = np.array(np.dot(q_direction, rec_lat.T), dtype="double")
try:
import phonopy._phonopy as phonoc # noqa F401
C_recip = self._get_c_recip_dipole_dipole(q_cart, q_dir_cart)
except ImportError:
print(
"Python version of dipole-dipole calculation is not well "
"implemented."
)
sys.exit(1)
# Mass weighted
mass = self._pcell.masses
num_atom = len(self._pcell)
for i in range(num_atom):
for j in range(num_atom):
C_recip[i, :, j, :] *= 1.0 / np.sqrt(mass[i] * mass[j])
C_dd = C_recip.reshape(num_atom * 3, num_atom * 3)
return C_dd
def _get_c_recip_dipole_dipole(self, q_cart, q_dir_cart):
"""Reciprocal part of Eq.(71) on the right hand side.
This is subtracted from supercell force constants to create
short-range force constants. Only once at commensurate points.
This is added to interpolated short range force constants
to create full force constants. Called many times.
"""
import phonopy._phonopy as phonoc
pos = self._pcell.positions
num_atom = len(pos)
volume = self._pcell.volume
dd = np.zeros((num_atom, 3, num_atom, 3), dtype=self._dtype_complex, order="C")
phonoc.recip_dipole_dipole(
dd.view(dtype="double"),
self._dd_q0.view(dtype="double"),
self._G_list,
q_cart,
q_dir_cart,
self._born,
self._dielectric,
np.array(pos, dtype="double", order="C"),
self._unit_conversion * 4.0 * np.pi / volume,
self._Lambda,
self._symprec,
)
return dd
def _run_c_recip_dipole_dipole_q0(self):
"""Reciprocal part of Eq.(71) second term on the right hand side.
Computed only once.
"""
import phonopy._phonopy as phonoc
pos = self._pcell.positions
self._dd_q0 = np.zeros((len(pos), 3, 3), dtype=self._dtype_complex, order="C")
phonoc.recip_dipole_dipole_q0(
self._dd_q0.view(dtype="double"),
self._G_list,
self._born,
self._dielectric,
np.array(pos, dtype="double", order="C"),
self._Lambda,
self._symprec,
)
# Limiting contribution
# inv_eps = np.linalg.inv(self._dielectric)
# sqrt_det_eps = np.sqrt(np.linalg.det(self._dielectric))
# coef = (4.0 / 3 / np.sqrt(np.pi) * inv_eps
# / sqrt_det_eps * self._Lambda ** 3)
# self._dd_q0 -= coef
def _get_py_dipole_dipole(self, K_list, q, q_dir_cart):
pos = self._pcell.positions
num_atom = len(self._pcell)
volume = self._pcell.volume
C = np.zeros((num_atom, 3, num_atom, 3), dtype=self._dtype_complex, order="C")
for q_K in K_list:
if np.linalg.norm(q_K) < self._symprec:
if q_dir_cart is None:
continue
else:
dq_K = q_dir_cart
else:
dq_K = q_K
Z_mat = self._get_charge_sum(
num_atom, dq_K, self._born
) * self._get_constant_factor(
dq_K, self._dielectric, volume, self._unit_conversion
)
for i in range(num_atom):
dpos = -pos + pos[i]
phase_factor = np.exp(2j * np.pi * np.dot(dpos, q_K))
for j in range(num_atom):
C[i, :, j, :] += Z_mat[i, j] * phase_factor[j]
for q_K in K_list:
q_G = q_K - q
if np.linalg.norm(q_G) < self._symprec:
continue
Z_mat = self._get_charge_sum(
num_atom, q_G, self._born
) * self._get_constant_factor(
q_G, self._dielectric, volume, self._unit_conversion
)
for i in range(num_atom):
C_i = np.zeros((3, 3), dtype=self._dtype_complex, order="C")
dpos = -pos + pos[i]
phase_factor = np.exp(2j * np.pi * np.dot(dpos, q_G))
for j in range(num_atom):
C_i += Z_mat[i, j] * phase_factor[j]
C[i, :, i, :] -= C_i
return C
def _get_G_list(self, G_cutoff, g_rad=100):
rec_lat = np.linalg.inv(self._pcell.cell) # column vectors
# g_rad must be greater than 0 for broadcasting.
G_vec_list = self._get_G_vec_list(g_rad, rec_lat)
G_norm2 = ((G_vec_list) ** 2).sum(axis=1)
return np.array(G_vec_list[G_norm2 < G_cutoff ** 2], dtype="double", order="C")
def _get_G_vec_list(self, g_rad, rec_lat):
pts = np.arange(-g_rad, g_rad + 1)
grid = np.meshgrid(pts, pts, pts)
for i in range(3):
grid[i] = grid[i].ravel()
return np.dot(rec_lat, grid).T
def _get_H(self):
lat = self._scell.cell
cart_vecs = np.dot(self._svecs, lat)
Delta = np.dot(cart_vecs, np.linalg.inv(self._dielectric).T)
D = np.sqrt(cart_vecs * Delta).sum(axis=3)
x = self._Lambda * Delta
y = self._Lambda * D
y2 = y ** 2
y3 = y ** 3
exp_y2 = np.exp(-y2)
eps_inv = np.linalg.inv(self._dielectric)
try:
from scipy.special import erfc
erfc_y = erfc(y)
except ImportError:
from math import erfc
erfc_y = np.zeros_like(y)
for i in np.ndindex(y.shape):
erfc_y[i] = erfc(y[i])
with np.errstate(divide="ignore", invalid="ignore"):
A = (3 * erfc_y / y3 + 2 / np.sqrt(np.pi) * exp_y2 * (3 / y2 + 2)) / y2
A[A == np.inf] = 0
A = np.nan_to_num(A)
B = erfc_y / y3 + 2 / np.sqrt(np.pi) * exp_y2 / y2
B[B == np.inf] = 0
B = np.nan_to_num(B)
H = np.zeros((3, 3) + y.shape, dtype="double", order="C")
for i, j in np.ndindex((3, 3)):
H[i, j] = x[:, :, :, i] * x[:, :, :, j] * A - eps_inv[i, j] * B
return H
class DynamicalMatrixWang(DynamicalMatrixNAC):
"""Non analytical term correction (NAC) by Wang et al."""
_method = "wang"
def __init__(
self,
supercell: PhonopyAtoms,
primitive: Primitive,
force_constants,
nac_params=None,
decimals=None,
symprec=1e-5,
log_level=0,
):
"""Init method.
Parameters
----------
supercell : PhonopyAtoms
Supercell.
primitive : Primitive
Primitive cell.
force_constants : array_like
Supercell force constants. Full and compact shapes of arrays are
supported.
shape=(supercell atoms, supercell atoms, 3, 3) for full FC.
shape=(primitive atoms, supercell atoms, 3, 3) for compact FC.
dtype='double'
symprec : float, optional, defualt=1e-5
Symmetri tolerance.
decimals : int, optional, default=None
Number of decimals. Use like dm.round(decimals).
log_levelc : int, optional, defualt=0
Log level.
"""
super().__init__(
supercell,
primitive,
force_constants,
symprec=symprec,
decimals=decimals,
log_level=log_level,
)
self._symprec = symprec
if nac_params is not None:
self.nac_params = nac_params
def show_nac_message(self):
"""Show Wang et al.'s paper reference."""
if self._log_level:
print("NAC by Wang et al., J. Phys. Condens. Matter 22, " "202201 (2010)")
def _set_nac_params(self, nac_params):
"""Set NAC parameters.
This is called via DynamicalMatrixNAC.nac_params.
"""
self._set_basic_nac_params(nac_params)
def _compute_dynamical_matrix(self, q_red, q_direction):
# Wang method (J. Phys.: Condens. Matter 22 (2010) 202201)
rec_lat = np.linalg.inv(self._pcell.cell) # column vectors
if q_direction is None:
q = np.dot(q_red, rec_lat.T)
else:
q = np.dot(q_direction, rec_lat.T)
constant = self._get_constant_factor(
q, self._dielectric, self._pcell.volume, self._unit_conversion
)
try:
import phonopy._phonopy as phonoc # noqa F401
self._run_c_Wang_dynamical_matrix(q_red, q, constant)
except ImportError:
num_atom = len(self._pcell)
fc_backup = self._force_constants.copy()
nac_q = self._get_charge_sum(num_atom, q, self._born) * constant
self._run_py_Wang_force_constants(self._force_constants, nac_q)
self._run(q_red)
self._force_constants = fc_backup
def _run_c_Wang_dynamical_matrix(self, q_red, q, factor):
import phonopy._phonopy as phonoc
fc = self._force_constants
mass = self._pcell.masses
size_prim = len(mass)
dm = np.zeros((size_prim * 3, size_prim * 3), dtype=self._dtype_complex)
if fc.shape[0] == fc.shape[1]: # full fc
phonoc.nac_dynamical_matrix(
dm.view(dtype="double"),
fc,
np.array(q_red, dtype="double"),
self._svecs,
self._multi,
mass,
self._s2p_map,
self._p2s_map,
np.array(q, dtype="double"),
self._born,
factor,
)
else:
phonoc.nac_dynamical_matrix(
dm.view(dtype="double"),
fc,
np.array(q_red, dtype="double"),
self._svecs,
self._multi,
mass,
self._s2pp_map,
np.arange(len(self._p2s_map), dtype="int_"),
np.array(q, dtype="double"),
self._born,
factor,
)
self._dynamical_matrix = dm
def _run_py_Wang_force_constants(self, fc, nac_q):
N = len(self._scell) // len(self._pcell)
for s1 in range(len(self._scell)):
# This if-statement is the trick.
# In contructing dynamical matrix in phonopy
# fc of left indices with s1 == self._s2p_map[ s1 ] are
# only used.
if s1 != self._s2p_map[s1]:
continue
p1 = self._s2pp_map[s1]
for s2 in range(len(self._scell)):
p2 = self._s2pp_map[s2]
fc[s1, s2] += nac_q[p1, p2] / N
def get_dynamical_matrix(
fc2,
supercell: PhonopyAtoms,
primitive: Primitive,
nac_params=None,
frequency_scale_factor=None,
decimals=None,
symprec=1e-5,
log_level=0,
):
"""Return dynamical matrix.
The instance of a class inherited from DynamicalMatrix will be returned
depending on paramters.
"""
if frequency_scale_factor is None:
_fc2 = fc2
else:
_fc2 = fc2 * frequency_scale_factor ** 2
if nac_params is None:
dm = DynamicalMatrix(supercell, primitive, _fc2, decimals=decimals)
else:
if "method" not in nac_params:
method = "gonze"
else:
method = nac_params["method"]
DM_cls: Union[Type[DynamicalMatrixGL], Type[DynamicalMatrixWang]]
if method == "wang":
DM_cls = DynamicalMatrixWang
else:
DM_cls = DynamicalMatrixGL
dm = DM_cls(
supercell,
primitive,
_fc2,
decimals=decimals,
symprec=symprec,
log_level=log_level,
)
dm.nac_params = nac_params
return dm
|
atztogo/phonopy
|
phonopy/harmonic/dynamical_matrix.py
|
Python
|
bsd-3-clause
| 35,625
|
[
"phonopy"
] |
51cc9a20e2281de17b055c8d21155a50957b0c77a3798ce56c145d076fa98f11
|
import numpy as np
from itertools import count
from ase import Atom, Atoms
from ase.data import atomic_numbers
from ase.calculators.calculator import Calculator
from ase import units
COUL_COEF = 14.399651725922272
def _tup2str(tup):
""" Convert tuple of atomic symbols to string
i.e. ('Ce','O') -> 'Ce-O' """
return '-'.join(tup)
def _str2tup(s):
""" Parse string and return tuple of atomic symbols
(not used atm) """
return tuple(s.split('-'))
class Buck(Calculator):
"""Buckinham potential calculator"""
implemented_properties = ['energy', 'forces']
def __init__(self, parameters):
"""
Calculator for Buckinham potential:
E_B = A exp(-r/rho) - C/r^6
with Coulumb interaction:
E_C = q_i * q_j e^2/(4pi eps_0 r^2)
charges are read from atoms object
Parameters:
parameters: dict
Mapping from pair of atoms to tuple containing A, rho and C
for that pair. A in eV, rho in Angstr, C in eV/Angstr^2
Example:
calc = Buck({('O', 'O'): (A, rho, C)})
"""
Calculator.__init__(self)
self.parameters = {}
for (symbol1, symbol2), (A, rho, C) in parameters.items():
self.parameters[_tup2str((symbol1, symbol2))] = A, rho, C
self.parameters[_tup2str((symbol2, symbol1))] = A, rho, C
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
if not(system_changes is None):
self.update_atoms()
species = set(atoms.numbers)
charges = [atom.charge for atom in atoms]
energy = 0.0
forces = np.zeros((len(atoms), 3))
chems = atoms.get_chemical_symbols()
for i, R1, symb1, Q1 in zip(count(), atoms.positions, chems, charges):
for j, R2, symb2, Q2 in zip(count(), atoms.positions, chems, charges):
if j<=i:
continue
pair = _tup2str((symb1,symb2))
if pair not in self.parameters:
continue
A, rho, C = self.parameters[pair]
D = R1 - R2
d2 = (D**2).sum()
if (d2 > 0.001):
d = np.sqrt(d2)
coul = COUL_COEF*Q1*Q2/d
Aexp = A*np.exp(-d/rho)
Cr6 = C/d2**3
energy += Aexp - Cr6 + coul
force = (1/rho*Aexp - 6/d*Cr6 + coul/d)*D/d
forces[i, :] += force
forces[j, :] += -force
if 'energy' in properties:
self.results['energy'] = energy
if 'forces' in properties:
self.results['forces'] = forces
def check_state(self, atoms, tol=1e-15):
return True
def update_atoms(self):
pass
def set_atoms(self, atoms):
self.atoms = atoms
self.update_atoms()
if __name__ == '__main__':
from ase import Atoms
atoms = Atoms(symbols='CeO', cell=[2, 2, 5], positions=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.4935832]]))
atoms[0].charge = +4
atoms[1].charge = -2
calc = Buck({('Ce', 'O'): (1176.3, 0.381, 0.0)})
#calc = Buck( {('Ce', 'O'): (0.0, 0.149, 0.0)} )
atoms.set_calculator(calc)
print('Epot = ', atoms.get_potential_energy())
print('Force = ', atoms.get_forces())
from ase.md.verlet import VelocityVerlet
dyn = VelocityVerlet(atoms, dt=0.1*units.fs, trajectory='test.traj', logfile='-')
dyn.run(1000)
print('coodrs = ', atoms.get_positions())
from ase.visualize import view
view(atoms)
|
lavakyan/ase-bimetall
|
calculators/buck.py
|
Python
|
gpl-2.0
| 3,849
|
[
"ASE"
] |
505e08f858c04dd8ec69928d29dd6752ac4a8fc3d47b7d640ec7c2316b08d75d
|
# tests 10341 weighted paths of TNFalpha pathway
plRead = open("TNFalpha-weighted-paths-pathlinker.txt", "r")
csRead = open("TNFalpha-weighted-paths-cytoscape.txt", "r")
# dict that stores path: weight
pl = {}
epsilon = 0.0001
def sameFloat(a, b):
return abs(a - b) <= epsilon
for line in plRead:
words = line.split( )
weight = float(words[0])
path = words[1]
pl[path] = weight
errors = 0
for line in csRead:
words = line.split( )
weight = float(words[0])
path = words[1]
if path not in pl:
print path + " " + str(weight)
errors += 1
else:
if not sameFloat(weight, pl[path]):
print path + " " + str(weight)
errors += 1
print errors == 0
|
tmmurali/PathLinker
|
data/TNFalpha/test-files/TNFalpha-NLT-weighted-paths-comparison.py
|
Python
|
gpl-3.0
| 673
|
[
"Cytoscape"
] |
581afd6ee7e1bad7575320d4c77b5cf3b4a21411ac36e80dc9df27b28d954f96
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
logger = logging.getLogger( 'camelot.core.files.storage' )
from camelot.view.model_thread import model_function
class StoredFile( object ):
"""Helper class for the File field type.
Stored file objects can be used within the GUI thread, as none of
its methods should block.
"""
def __init__( self, storage, name ):
"""
:param storage: the storage in which the file is stored
:param name: the key by which the file is known in the storage"""
self.storage = storage
self.name = name
@property
def verbose_name( self ):
"""The name of the file, as it is to be displayed in the GUI"""
return self.name
def __unicode__( self ):
return self.verbose_name
class StoredImage( StoredFile ):
"""Helper class for the Image field type Class linking an image and the
location and filename where the image is stored"""
def __init__( self, storage, name ):
super(StoredImage, self).__init__( storage, name )
self._thumbnails = dict()
@model_function
def checkout_image( self ):
"""Checkout the image from the storage, and return a QImage"""
from PyQt4.QtGui import QImage
p = self.storage.checkout( self )
return QImage( p )
@model_function
def checkout_thumbnail( self, width, height ):
"""Checkout a thumbnail for this image from the storage
:return: a QImage"""
key = (width, height)
try:
thumbnail_image = self._thumbnails[key]
return thumbnail_image
except KeyError:
pass
from PyQt4.QtCore import Qt
original_image = self.checkout_image()
thumbnail_image = original_image.scaled( width, height, Qt.KeepAspectRatio )
self._thumbnails[key] = thumbnail_image
return thumbnail_image
class Storage( object ):
"""Helper class that opens and saves StoredFile objects
The default implementation stores files in the settings.CAMELOT_MEDIA_ROOT
directory. The storage object should only be used within the model thread,
as all of it's methods might block.
The methods of this class don't verify if they are called on the model
thread, because these classes can be used on the server as well.
"""
def __init__( self, upload_to = '',
stored_file_implementation = StoredFile,
root = None ):
"""
:param upload_to: the sub directory in which to put files
:param stored_file_implementation: the subclass of StoredFile to be used when
checking out files from the storage
:param root: the root directory in which to put files, this may be a callable that
takes no arguments. if root is a callable, it will be called in the model thread
to get the actual root of the media store.
The actual files will be put in root + upload to. If None is given as root,
the settings.CAMELOT_MEDIA_ROOT will be taken as the root directory.
"""
import settings
self._root = (root or settings.CAMELOT_MEDIA_ROOT)
self._subfolder = upload_to
self._upload_to = None
self.stored_file_implementation = stored_file_implementation
#
# don't do anything here that might reduce the startup time, like verifying the
# availability of the storage, since the path might be on a slow network share
#
@property
def upload_to(self):
if self._upload_to == None:
import os
if callable( self._root ):
root = self._root()
else:
root = self._root
self._upload_to = os.path.join( root, self._subfolder )
return self._upload_to
def available(self):
"""Verify if the storage is available
"""
import os
try:
if not os.path.exists( self.upload_to ):
os.makedirs( self.upload_to )
return True
except Exception, e:
logger.warn( 'Could not access or create path %s, files will be unreachable' % self.upload_to, exc_info = e )
return False
def exists( self, name ):
"""True if a file exists given some name"""
if self.available():
import os
return os.path.exists( self.path( name ) )
return False
def list(self, prefix='*', suffix='*'):
"""Lists all files with a given prefix and or suffix available in this storage
:return: a iterator of StoredFile objects
"""
import glob
import os
return (StoredFile(self, os.path.basename(name) ) for name in glob.glob( os.path.join( self.upload_to, u'%s*%s'%(prefix, suffix) ) ) )
def path( self, name ):
"""The local filesystem path where the file can be opened using Python standard open"""
import os
return os.path.join( self.upload_to, name )
def checkin( self, local_path, filename=None ):
"""Check the file pointed to by local_path into the storage, and
return a StoredFile
:param local_path: the path to the local file that needs to be checked in
:param filename: a hint for the filename to be given to the checked in file, if None
is given, the filename from the local path will be taken.
The stored file is not guaranteed to have the filename asked, since the
storage might not support this filename, or another file might be named
like that. In each case the storage will choose the filename.
"""
self.available()
import tempfile
import shutil
import os
to_path = os.path.join( self.upload_to, filename or os.path.basename( local_path ) )
if os.path.exists(to_path):
# only if the default to_path exists, we'll give it a new name
root, extension = os.path.splitext( filename or os.path.basename( local_path ) )
( handle, to_path ) = tempfile.mkstemp( suffix = extension, prefix = root, dir = self.upload_to, text = 'b' )
os.close( handle )
logger.debug( u'copy file from %s to %s', local_path, to_path )
shutil.copy( local_path, to_path )
return self.stored_file_implementation( self, os.path.basename( to_path ) )
def checkin_stream( self, prefix, suffix, stream ):
"""Check the datastream in as a file into the storage
:param prefix: the prefix to use for generating a file name
:param suffix: the suffix to use for generating a filen name, eg '.png'
:return: a StoredFile"""
self.available()
import tempfile
import os
( handle, to_path ) = tempfile.mkstemp( suffix = suffix, prefix = prefix, dir = self.upload_to, text = 'b' )
logger.debug(u'checkin stream to %s'%to_path)
file = os.fdopen( handle, 'wb' )
file.write( stream.read() )
file.flush()
file.close()
return self.stored_file_implementation( self, os.path.basename( to_path ) )
def checkout( self, stored_file ):
"""Check the file pointed to by the local_path out of the storage and return
a local filesystem path where the file can be opened"""
self.available()
import os
return os.path.join( self.upload_to, stored_file.name )
def checkout_stream( self, stored_file ):
"""Check the file stored_file out of the storage as a datastream
:return: a file object
"""
self.available()
import os
return open( os.path.join( self.upload_to, stored_file.name ), 'rb' )
def delete( self, name ):
pass
class S3Storage( object ):
"""Helper class that opens and saves StoredFile objects into Amazon S3.
these attibutes need to be set in your settings for S3Storage to work :
* AWS_ACCESS_KEY_ID = '<INSERT YOUR AWS ACCESS KEY ID HERE>'
* AWS_SECRET_ACCESS_KEY = '<INSERT YOUR AWS SECRET ACCESS KEY HERE>'
* AWS_BUCKET_NAME = 'camelot'
* AWS_LOCATION = S3.Location.DEFAULT
Using this Storage requires the availability of S3.py on your PYTHONPATH.
S3.py can be found on the amazon.com website
"""
def __init__( self, upload_to = '', stored_file_implementation = StoredFile ):
# try to work around bug S3 code which uses bad names of days
# http://code.google.com/p/boto/issues/detail?id=140
# but workaround doesn't work :(
#import locale
# locale.setlocale(locale.LC_TIME, 'en_US.utf8')
# print 'create S3 storage'
import settings
import S3
self.upload_to = upload_to
conn = S3.AWSAuthConnection( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY )
# _generator = S3.QueryStringAuthGenerator( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY )
if ( conn.check_bucket_exists( settings.AWS_BUCKET_NAME ).status == 200 ):
pass
else:
conn.create_located_bucket( settings.AWS_BUCKET_NAME, settings.AWS_LOCATION ).message
|
kurtraschke/camelot
|
camelot/core/files/storage.py
|
Python
|
gpl-2.0
| 10,189
|
[
"VisIt"
] |
93977b246d1447430ff08bc08231f093106ed89fd60f1de59be1f5930bd0987f
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/mixture/plot_gmm.py
|
Python
|
bsd-3-clause
| 2,817
|
[
"Gaussian"
] |
dce93cc6481c7fb2c742c65a0128b2c0cb14aa9a0ba81974ff87be9e73fef75f
|
import pysam
import sys
import re
import subprocess
import os
from collections import defaultdict
from Bio import AlignIO
# reverse complement a sequence
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
def revcomp(seq):
reverse_complement = "".join(complement.get(base, base) for base in reversed(seq))
return reverse_complement
# parse an LAshow read index string into a numeric ID
# the IDs are 1-based and contain commas
def lashow_idstr2idx(s):
return int(s.replace(',', '')) - 1
# remove non-numeric characters from a string
def remove_nonnumeric(s):
return re.sub("[^0-9]", "", s)
def remove_commas(s):
return re.sub(",", "", s)
# parse an LAshow output file and build a map from a read index
# to the sequences that align to it
def parse_lashow(fn):
fh = open(fn, 'r')
out = defaultdict(list)
# this is how to parse the LAshow output.
pattern = re.compile("\D*(\d+)\s+(\d+)\s+(\w)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+).*")
for line in fh:
# Strip commas from the numbers to make the regex easier
line = remove_commas(line)
m = pattern.match(line)
if m is None:
continue
id1 = lashow_idstr2idx(m.group(1))
id2 = lashow_idstr2idx(m.group(2))
strand = m.group(3)
#
s = int(m.group(6))
e = int(m.group(7))
out[id1].append((id2, strand, s, e))
return out
# write a fasta file for input into POA
def write_poa_input(overlaps, read_idx):
fn = "poa.input.%d.fa" % (read_idx)
fh = open(fn, "w")
read_id1 = ref.references[read_idx]
seq1 = ref.fetch(read_id1)
fh.write(">%s\n%s\n" % ("poabaseread", seq1))
n_reads = 0
for o in overlaps[read_idx]:
read_id2 = ref.references[o[0]]
seq2 = ref.fetch(read_id2)
# strand
if o[1] == "c":
seq2 = revcomp(seq2)
# restrict to the part of the sequence that matches read1
seq2 = seq2[o[2]:o[3]]
fh.write(">%s\n%s\n" % (read_id2, seq2))
n_reads += 1
fh.close()
return (fn, n_reads)
def clustal2consensus(fn):
alignment = AlignIO.read(fn, "clustal")
min_coverage = 3
read_row = -1
consensus_row = -1
for (i, record) in enumerate(alignment):
if record.id == 'poabaseread':
read_row = i
if record.id == 'CONSENS0':
consensus_row = i
if consensus_row == -1:
return ""
# Work out the first and last columns that contains
# bases of the read we are correcting
(first_col, last_col) = get_sequence_coords(alignment[read_row].seq)
# Calculate a vector of depths along the consensus
depths = [0] * len(alignment[read_row].seq)
for record in alignment:
(aln_first_col, aln_last_col) = get_sequence_coords(record.seq)
for i in xrange(aln_first_col, aln_last_col):
if aln_first_col >= first_col and aln_last_col <= last_col \
and not record.id.startswith('CONSENS'):
depths[i] += 1
# Change the boundaries to only include high-depth bases
while first_col != last_col:
if depths[first_col] >= min_coverage:
break
first_col += 1
while last_col != first_col:
if depths[last_col] >= min_coverage:
break
last_col -= 1
# Extract the consensus sequence
consensus = str(alignment[consensus_row].seq[first_col:last_col])
consensus = consensus.replace('-', '')
return consensus
# Return the first and last column of the multiple alignment
# that contains a base for the given sequence row
def get_sequence_coords(seq):
first_col = -1
last_col = -1
for (i, s) in enumerate(seq):
if s != '-' and first_col == -1:
first_col = i
if s != '-':
last_col = i
return (first_col, last_col)
#
def run_poa_and_consensus(overlaps, read_idx):
(in_fn, n_reads) = write_poa_input(overlaps, read_idx)
out_fn = "clustal-%d.out" % (read_idx)
DEVNULL = open(os.devnull, 'wb')
blosum_file = "poa-blosum80.mat"
if not os.path.exists(blosum_file):
# use blosum file relative to the 'nanocorrect.py' when local not available.
blosum_file = os.path.join(os.path.dirname(__file__), blosum_file)
if not os.path.exists(blosum_file):
sys.stderr.write("error: poa-blosum80.mat not found\n")
sys.exit(1)
cmd = "poa -read_fasta %s -clustal %s -hb %s" % (in_fn, out_fn, blosum_file)
p = subprocess.Popen(cmd, shell=True, stderr=DEVNULL)
p.wait()
if p.returncode != 0:
sys.stderr.write("error: failed to run poa - is it on your PATH?\n")
sys.exit(1)
consensus = clustal2consensus(out_fn)
os.remove(in_fn)
os.remove(out_fn)
return (consensus, n_reads)
def run_lashow(name, start, end):
out_fn = "lashow.%s-%s.out" % (start, end)
out_fh = open(out_fn, 'w')
cmd = "LAshow %s.db %s.las %s-%s" % (name, name, start, end)
p = subprocess.Popen(cmd, shell=True, stdout=out_fh)
p.wait()
if p.returncode != 0:
sys.stderr.write("error: failed to run LAshow - is it on your PATH?\n")
sys.exit(1)
out_fh.close()
return out_fn
# Args
if len(sys.argv) != 3:
sys.stderr.write("error: received %d arguments instead of 2\n" % (len(sys.argv) - 1))
sys.stderr.write("usage: python nanocorrect.py <db name> <read range>\n")
sys.exit(1)
name = sys.argv[1]
read_range = sys.argv[2]
# Open reference file
ref_fn = "%s.pp.fasta" % (name)
ref = pysam.Fastafile(ref_fn)
# Parse the range of read ids to correct
start = 0
end = 0
range_max = ref.nreferences
if read_range == "all":
end = range_max
elif read_range == "{}":
sys.stderr.write("error: {} is an invalid read range.\n")
sys.stderr.write("Please check that your version of gnu parallel is functioning correctly\n")
sys.exit(1)
else:
(start, end) = [ int(x) for x in read_range.split(':') ]
if start < 0 or end > range_max:
sys.stderr.write("error: %d:%d is an invalid read range - read range limits are [0 %d])\n" % (start, end, range_max))
sys.exit(1)
# Generate the LAshow file indicating overlaps
# The indices that nanocorrect takes are zero-based exclusive ends but
# LAshow is 1-based inclusive ends. Translate between the indexing
# schemes here.
lashow_fn = run_lashow(name, start + 1, end)
# Make a dictionary of overlaps
overlaps = parse_lashow(lashow_fn)
# Correct each read with POA
for read_idx in xrange(start, end):
(seq, n_reads) = run_poa_and_consensus(overlaps, read_idx)
if seq != "":
print ">%d n_reads=%d\n%s" % (read_idx, n_reads, seq)
os.remove(lashow_fn)
|
jts/nanocorrect
|
nanocorrect.py
|
Python
|
mit
| 6,748
|
[
"pysam"
] |
871120fd1c6a1ea1c50d4e837c203b7bcb11b82334c4b5c4d63338b9592dd66f
|
"""Unrolls neighbor loops and InputElementZeroOffset nodes in a StencilModel.
The second stage in stencil kernel processing, after
stencil_python_front_end and before stencil_convert. This stage is
done once per call because the dimensions of the input are needed.
"""
from stencil_model import *
from stencil_grid import *
import ast
from assert_utils import *
from copy import deepcopy
class StencilUnrollNeighborIter(ast.NodeTransformer):
def __init__(self, stencil_model, input_grids, output_grid, inject_failure=None):
assert_has_type(stencil_model, StencilModel)
assert len(input_grids) == len(stencil_model.input_grids), 'Incorrect number of input grids'
self.model = stencil_model
self.input_grids = input_grids
self.output_grid = output_grid
self.inject_failure = inject_failure
super(StencilUnrollNeighborIter, self).__init__()
class NoNeighborIterChecker(ast.NodeVisitor):
def __init__(self):
super(StencilUnrollNeighborIter.NoNeighborIterChecker, self).__init__()
def visit_StencilNeighborIter(self, node):
assert False, 'Encountered StencilNeighborIter but all should have been removed'
def visit_InputElementZeroOffset(self, node):
assert False, 'Encountered InputElementZeroOffset but all should have been removed'
def visit_NeighborDistance(self, node):
assert False, 'Encountered NeighborDistance but all should have been removed'
def run(self):
self.visit(self.model)
StencilModelChecker().visit(self.model)
StencilUnrollNeighborIter.NoNeighborIterChecker().visit(self.model)
return self.model
def visit_StencilModel(self, node):
self.input_dict = dict()
for i in range(len(node.input_grids)):
self.input_dict[node.input_grids[i].name] = self.input_grids[i]
self.generic_visit(node)
def visit_Kernel(self, node):
body = []
for statement in node.body:
if type(statement) is StencilNeighborIter:
body.extend(self.visit_StencilNeighborIter_return_list(statement))
else:
body.append(self.visit(statement))
return Kernel(body)
def visit_StencilNeighborIter_return_list(self, node):
grid = self.input_dict[node.grid.name]
neighbors_id = node.neighbors_id.value
zero_point = tuple([0 for x in range(grid.dim)])
result = []
self.current_neighbor_grid_id = node.grid
for x in grid.neighbors(zero_point, neighbors_id):
self.offset_list = list(x)
for statement in node.body:
result.append(self.visit(deepcopy(statement)))
self.offset_list = None
self.current_neighbor_grid = None
return result
def visit_Neighbor(self, node):
return InputElement(self.current_neighbor_grid_id, self.offset_list)
def visit_InputElementZeroOffset(self, node):
grid = self.input_dict[node.grid.name]
zero_point = tuple([0 for x in range(grid.dim)])
return InputElement(node.grid, zero_point)
def visit_InputElementExprIndex(self, node):
grid = self.input_dict[node.grid.name]
assert grid.dim == 1, 'Grid \'%s\' has dimension %s but expected dimension 1 because this kernel indexes into it using an expression' % (grid, grid.dim)
self.generic_visit(node)
return node
def visit_NeighborDistance(self, node):
zero_point = tuple([0 for x in range(len(self.offset_list))])
if self.inject_failure=='manhattan_distance':
return Constant(manhattan_distance(zero_point, self.offset_list))
else:
return Constant(distance(zero_point, self.offset_list))
|
pbirsinger/aspNew
|
tools/debugger/stencil/stencil_unroll_neighbor_iter.py
|
Python
|
bsd-3-clause
| 3,792
|
[
"VisIt"
] |
1919c0f576916a6111684344935a064e93cceb2b9c2dcf78c3e1f0eecc793644
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 17 13:17:30 2016
@author: nn31
"""
import pandas as pd
import pickle
#read in cmmi data
#import our data based on the file 'DataDictionary for QDACT on V
cmmi = pd.read_csv("/Volumes/DCCRP_projects/CMMI-Pro62342/data/QDACT 05-03-2016.csv",
parse_dates=['AssessmentDate','AdmissionDate','DischargeDate','PalliativeDischargeDate'])
dd = pickle.load(open("/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/02_data_dictionary_dict.p", "rb" ))
cmmi['counts'] = 1
four_seasons = cmmi.loc[cmmi.site_name.isin(['Four Seasons'])]
catawba = cmmi.loc[cmmi.site_name.isin(['Catawba'])]
greenville = cmmi.loc[cmmi.site_name.isin(['Greenville Health Systems (GHS)'])]
#we get everythin?
(cmmi.shape[0]==(four_seasons.shape[0]+catawba.shape[0]+greenville.shape[0]))
overall = cmmi.groupby('internalid').sum()['counts'].tolist()
four_seasonsC = four_seasons.groupby('internalid').sum()['counts'].tolist()
catawbaC = catawba.groupby('internalid').sum()['counts'].tolist()
greenvilleC = greenville.groupby('internalid').sum()['counts'].tolist()
visit_freq = dict()
visit_freq['overall'] = overall
visit_freq['four_seasonsC'] = four_seasonsC
visit_freq['catawbaC'] = catawbaC
visit_freq['greenvilleC'] = greenvilleC
pickle.dump( visit_freq, open( "/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/visitfreq.p", "wb" ) )
new = pickle.load( open( "/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/visitfreq.p", "rb" ) )
#create some histograms
import numpy
import numpy as np
from matplotlib import pyplot
bins = numpy.linspace(0,30,30)
pyplot.hist(new.get('overall'), bins, alpha=0.25, label='CMMI - Overall')
pyplot.hist(new.get('four_seasonsC'), bins, alpha=0.25, label='Four Seasons')
pyplot.hist(new.get('catawbaC') , bins, alpha=0.25, label='Catawba')
pyplot.hist(new.get('greenvilleC'), bins, alpha=0.25, label='Greenville Health Systems')
pyplot.legend(loc='upper right')
pyplot.show()
#Create the visit-sequence file(s) that our d3 graphs depend on
def recode(z):
if z == 10000:
return('Cancer')
if z == 20000:
return('Cardiovascular')
if z == 30000:
return('Pulmonary')
if z == 40000:
return('Gastrointestinal')
if z == 50000:
return('Renal')
if z == 60000:
return('Neurologic')
if z == 70000:
return('Infectious')
if z == 997:
return('Other Diagnosis')
if z == 998:
return('Missing')
if z == 999:
return('Unknown')
cmmi['PrimaryDiagnosis2'] = cmmi.PrimaryDiagnosis.apply(lambda x: recode(x))
def createString(*args):
return('-'.join(args))
internalidGroup = cmmi.groupby('internalid')
zz = internalidGroup.PrimaryDiagnosis2.apply(lambda x: createString(*list(x)))
zz_vc = zz.value_counts()
out = zz_vc[zz_vc.index.sort_values(ascending=False)]
out.to_csv('/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/visit-sequences_primdiag.csv')
#Create the visit-sequence file(s) that our d3 graphs depend on
def recode2(z):
if z == 1:
return('Hospital(Gen)')
if z == 2:
return('Hospital(ICU)')
if z == 3:
return('Outpat(Clin)')
if z == 4:
return('LongTermCare')
if z == 5:
return('Home')
if z == 6:
return('ED')
if z == 7:
return('PalliativeCareUnit')
if z == 997:
return('Other')
if np.isnan(z):
return('Missing')
cmmi['ConsultLoc2'] = cmmi.ConsultLoc.apply(lambda x: recode2(x))
internalidGroup = cmmi.groupby('internalid')
zzz = internalidGroup.ConsultLoc2.apply(lambda x: createString(*list(x)))
zzz_vc = zzz.value_counts()
out2 = zzz_vc[zzz_vc.index.sort_values(ascending=False)]
out2.to_csv('/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/visit-sequences_cl.csv')
|
benneely/qdact-basic-analysis
|
notebooks/python_scripts/03_follow-up_viz.py
|
Python
|
gpl-3.0
| 3,939
|
[
"VisIt"
] |
257e4c4ff5cde7280b9e10b038a031b9dec3c093489d397e48a8597c0c2024e4
|
import random
import hashlib
from django.conf import settings
NAMES = [u'3_d_man',
u'alars',
u'aardwolf',
u'abdul_alhazred',
u'abe_brown',
u'abigail_brand',
u'abner_jenkins',
u'abner_little',
u'abominable_snowman',
u'abomination',
u'abominatrix',
u'abraham_cornelius',
u'abraxas',
u'absalom',
u'absorbing_man',
u'abyss',
u'access',
u'achebe',
u'achelous',
u'achilles',
u'acrobat',
u'adam_ii',
u'adam_warlock',
u'adam_x',
u'adaptoid',
u'administrator',
u'adonis',
u'adrenazon',
u'adri_nital',
u'adrian_corbo',
u'adrian_toomes',
u'adrienne_frost',
u'adversary',
u'advisor',
u'aegis',
u'aelfyre_whitemane',
u'aero',
u'aftershock',
u'agamemnon',
u'agamotto',
u'agatha_harkness',
u'aged_genghis',
u'agent',
u'agent_axis',
u'agent_cheesecake',
u'agent_x',
u'agent_zero',
u'aggamon',
u'aginar',
u'agon',
u'agony',
u'agron',
u'aguja',
u'ahab',
u'ahmet_abdol',
u'ahura',
u'air_walker',
u'airborne',
u'aireo',
u'airstrike',
u'ajak',
u'ajax',
u'ajaxis',
u'akasha',
u'akhenaten',
u'al_mackenzie',
u'alaris',
u'albert',
u'albino',
u'albion',
u'alchemy',
u'alcmena',
u'aldebron',
u'aleksander_lukin',
u'aleksei_sytsevich',
u'aleta_ogord',
u'alex',
u'alex_hayden',
u'alex_power',
u'alex_wilder',
u'alexander_bont',
u'alexander_goodwin_pierce',
u'alexander_lexington',
u'alexander_summers',
u'alfie_omeggan',
u'algrim_the_strong',
u'alibar',
u'alicia_masters',
u'alistair_smythe',
u'alistaire_stuart',
u'aliyah_bishop',
u'alkhema',
u'all_american',
u'allatou',
u'allison_blaire',
u'alpha_ray',
u'alpha_the_ultimate_mutant',
u'alyosha_kravinoff',
u'alysande_stuart',
u'alyssa_moy',
u'amahl_farouk',
u'amalgam',
u'amanda_sefton',
u'amatsu_mikaboshi',
u'amazon',
u'amber_hunt',
u'amelia_voght',
u'amergin',
u'american_ace',
u'american_dream',
u'american_eagle',
u'american_samurai',
u'americop',
u'ameridroid',
u'amiko_kobayashi',
u'amina_synge',
u'aminedi',
u'ammo',
u'amphibian',
u'amphibion',
u'amphibius',
u'amun',
u'anaconda',
u'anais',
u'analyzer',
u'anarchist',
u'ancient_one',
u'andreas_von_strucker',
u'andrew_chord',
u'andrew_gervais',
u'android_man',
u'andromeda',
u'anelle',
u'angar_the_screamer',
u'angel',
u'angel_dust',
u'angel_face',
u'angel_salvadore',
u'angela_cairn',
u'angela_del_toro',
u'angelica_jones',
u'angelo_unuscione',
u'angler',
u'ani_mator',
u'animus',
u'ankhi',
u'annalee',
u'anne_marie_cortez',
u'annex',
u'annie_ghazikhanian',
u'annihilus',
u'anole',
u'anomalito',
u'anomaloco',
u'anomaly',
u'answer',
u'ant_man',
u'anthropomorpho',
u'anti_cap',
u'anti_phoenix_force',
u'anti_venom',
u'anti_vision',
u'antimatter',
u'antiphon_the_overseer',
u'antonio',
u'anubis',
u'anvil',
u'anything',
u'apache_kid',
u'apalla',
u'ape',
u'ape_man',
u'ape_x',
u'apocalypse',
u'apollo',
u'apryll',
u'aquarian',
u'aquarius',
u'aqueduct',
u'arabian_knight',
u'arachne',
u'aragorn',
u'araki',
u'aralune',
u'arana',
u'arc',
u'arcade',
u'arcademan',
u'arcanna',
u'archangel',
u'archenemy',
u'archer',
u'archie_corrigan',
u'archimage',
u'architect',
u'arclight',
u'arcturus_rann',
u'ardina',
u'ardroman',
u'arena',
u'ares',
u'argo',
u'argus',
u'ariann',
u'arides',
u'ariel',
u'aries',
u'arishem_the_judge',
u'arize',
u'arizona_annie',
u'arkady_rossovich',
u'arkon',
u'arkus',
u'arlette_truffaut',
u'arlok',
u'armadillo',
u'armageddon',
u'armand_martel',
u'armor',
u'armory',
u'arnim_zola',
u'arno_stark',
u'arranger',
u'arsenal',
u'arsenic',
u'artemis',
u'arthur_parks',
u'artie',
u'artie_maddicks',
u'arturo_falcones',
u'asbestos_lady',
u'asbestos_man',
u'ashcan',
u'asmodeus',
u'asp',
u'assassin',
u'asteroth',
u'astra',
u'astrid_bloom',
u'astron',
u'astronomer',
u'asylum',
u'atalanta',
u'atalon',
u'athena',
u'atlas',
u'atleza',
u'atom_bob',
u'atom_smasher',
u'att_lass',
u'attuma',
u'atum',
u'aunt_may_parker',
u'auntie_freeze',
u'auric',
u'aurora',
u'authority',
u'autolycus',
u'avalanche',
u'avarrish',
u'awesome_android',
u'axum',
u'azazel',
u'baal',
u'balder',
u'balor',
u'balthakk',
u'bandit',
u'banshee',
u'bantam',
u'baphomet',
u'barbarus',
u'barnacle',
u'baron_blood',
u'baron_brimstone',
u'baron_macabre',
u'baron_mordo',
u'baron_samedi',
u'baron_strucker',
u'baron_von_blitzschlag',
u'baron_zemo',
u'baroness_blood',
u'barracuda',
u'bart_hamilton',
u'base',
u'basil_sandhurst',
u'basilisk',
u'bast',
u'bastion',
u'batragon',
u'batroc_the_leaper',
u'battering_ram',
u'battleaxe',
u'battlestar',
u'battletide',
u'batwing',
u'beast',
u'beautiful_dreamer',
u'bedlam',
u'bedlam_ii',
u'beetle',
u'beetle_ii',
u'behemoth',
u'bela',
u'belasco',
u'belathauzer',
u'bella_donna',
u'ben_parker',
u'ben_reilly',
u'ben_urich',
u'benazir_kaur',
u'benedict_kine',
u'bengal',
u'benjamin_jacob_grimm',
u'bennet_du_paris',
u'benny_beckley',
u'bentley_wittman',
u'bereet',
u'berzerker',
u'bes',
u'beta_ray_bill',
u'bethany_cabe',
u'betty_brant',
u'betty_brant_leeds',
u'betty_ross_banner',
u'bevatron',
u'beyonder',
u'bi_beast',
u'bible_john',
u'big_bertha',
u'big_man',
u'big_wheel',
u'bill_foster',
u'binary',
u'bird_brain',
u'bird_man',
u'bishop',
u'bison',
u'bizarnage',
u'black_bolt',
u'black_box',
u'black_cat',
u'black_crow',
u'black_death',
u'black_dragon',
u'black_fox',
u'black_goliath',
u'black_jack_tarr',
u'black_king',
u'black_knight',
u'black_lama',
u'black_mamba',
u'black_marvel',
u'black_panther',
u'black_queen',
u'black_talon',
u'black_tarantula',
u'black_tom_cassidy',
u'black_widow',
u'blackbird',
u'blackheart',
u'blackheath',
u'blacklash',
u'blackout',
u'blackwing',
u'blackwulf',
u'blade',
u'blaquesmith',
u'blastaar',
u'blaze',
u'blazing_skull',
u'blind_faith',
u'blind_justice',
u'blindside',
u'blindspot',
u'bling',
u'blink',
u'blistik',
u'blitziana',
u'blitzkrieger',
u'blizzard',
u'blizzard_ii',
u'blob',
u'blockbuster',
u'bloke',
u'blonde_phantom',
u'blood_brothers',
u'blood_rose',
u'blood_spider',
u'bloodaxe',
u'bloodhawk',
u'bloodlust',
u'bloodlust_ii',
u'bloodscream',
u'bloodshed',
u'bloodsport',
u'bloodstorm',
u'bloodtide',
u'bloodwraith',
u'blowhard',
u'blue_bullet',
u'blue_diamond',
u'blue_marvel',
u'blue_shield',
u'blue_streak',
u'blur',
u'bob',
u'bob_diamond',
u'bobster',
u'bogeyman',
u'bombshell',
u'boneyard',
u'bonita_juarez',
u'boobytrap',
u'book',
u'boom_boom',
u'boom_boy',
u'boomer',
u'boomerang',
u'boomslang',
u'boost',
u'bora',
u'bounty',
u'bounty_hunter',
u'bova',
u'box',
u'box_iv',
u'brain_cell',
u'brain_drain',
u'brain_child',
u'brainchild',
u'bram_velsing',
u'brass',
u'bres',
u'brian_braddock',
u'brian_falsworth',
u'brigade',
u'briquette',
u'brother_nature',
u'brother_tode',
u'brother_voodoo',
u'brothers_grimm',
u'bruiser',
u'brunnhilda',
u'brutacus',
u'brute_i',
u'brute_ii',
u'brute_iii',
u'brynocki',
u'bucky',
u'bucky_iii',
u'bug',
u'bulldozer',
u'bullet',
u'bullseye',
u'burner',
u'burstarr',
u'bushman',
u'bushmaster',
u'bushwacker',
u'butterball',
u'buzz',
u'buzzard',
u'byrrah',
u'caber',
u'cable',
u'cadaver',
u'cagliostro',
u'caiera',
u'caiman',
u'cain',
u'cain_marko',
u'caleb_alexander',
u'caliban',
u'callisto',
u'calvin_rankin',
u'calypso',
u'cameron_hodge',
u'canasta',
u'cancer',
u'candra',
u'cannonball',
u'cannonball_i',
u'cap_n_hawk',
u'caprice',
u'capricorn',
u'captain_america',
u'captain_atlas',
u'captain_barracuda',
u'captain_britain',
u'captain_fate',
u'captain_germany',
u'captain_marvel',
u'captain_omen',
u'captain_savage',
u'captain_uk',
u'captain_ultra',
u'captain_universe',
u'captain_wings',
u'captain_zero',
u'cardiac',
u'cardinal',
u'caregiver',
u'caretaker',
u'carl_crusher_creel',
u'carlos_lobo',
u'carmella_unuscione',
u'carmilla_black',
u'carnage',
u'carnivore',
u'carolyn_parmenter',
u'carolyn_trainer',
u'carrie_alexander',
u'carrion',
u'carter_ghazikhanian',
u'cassandra_nova',
u'cassie_lang',
u'cassiopea',
u'cat',
u'cat_man',
u'catiana',
u'cayman',
u'cecelia_reyes',
u'cecilia_reyes',
u'celestial_madonna',
u'centennial',
u'centurion',
u'centurious',
u'centurius',
u'cerberus',
u'cerebra',
u'cerise',
u'cessily_kincaid',
u'cethlann',
u'chod',
u'chaka',
u'challenger',
u'chamber',
u'chameleon',
u'champion_of_the_universe',
u'chan_luichow',
u'chance',
u'changeling',
u'chaos',
u'charcoal',
u'charles_xavier',
u'charlie_27',
u'charon',
u'chase_stein',
u'cheetah',
u'chemistro',
u'chen_lu',
u'chi_demon',
u'chief_examiner',
u'chimera',
u'chloe_tran',
u'choice',
u'chondu_the_mystic',
u'christopher_summers',
u'chrome',
u'chronos',
u'chthon',
u'chtylok',
u'citizen_v',
u'claire_voyant',
u'claudette_st_croix',
u'clea',
u'clearcut',
u'cletus_kasady',
u'clint_barton',
u'clive',
u'cloak',
u'cloud',
u'cloud_9',
u'clown',
u'coach',
u'coachwhip',
u'cobalt_man',
u'cobra',
u'cody_mushumanski_gun_man',
u'cold_war',
u'coldblood',
u'coldfire',
u'collective_man',
u'collector',
u'colleen_wing',
u'colonel',
u'colonel_america',
u'colossus',
u'comet',
u'comet_man',
u'commander_kraken',
u'commando',
u'conan_the_barbarian',
u'condor',
u'conquer_lord',
u'conquest',
u'conquistador',
u'conrad_josten',
u'constrictor',
u'contemplator',
u'contessa',
u'contrary',
u'controller',
u'copperhead',
u'copycat',
u'coral',
u'cordelia_frost',
u'cornelius_van_lunt',
u'corona',
u'corruptor',
u'corsair',
u'cottonmouth',
u'count_abyss',
u'count_nefaria',
u'courier',
u'cowgirl',
u'crazy_eight',
u'crime_master',
u'crime_buster',
u'crimebuster',
u'crimson',
u'crimson_cavalier',
u'crimson_commando',
u'crimson_cowl',
u'crimson_craig',
u'crimson_daffodil',
u'crimson_dynamo',
u'crimson_dynamo_v',
u'crimson_and_the_raven',
u'crippler',
u'crooked_man',
u'crossbones',
u'crossfire',
u'crown',
u'crucible',
u'crusader',
u'crusher',
u'crystal',
u'curtis_connors',
u'cutthroat',
u'cybele',
u'cybelle',
u'cyber',
u'cyborg_x',
u'cyclone',
u'cyclops',
u'cypher',
u'dken',
u'dspayre',
u'd_man',
u'dj',
u'dagger',
u'daisy_johnson',
u'dakimh_the_enchanter',
u'dakota_north',
u'damballah',
u'damion_hellstrom',
u'damon_dran',
u'dan_ketch',
u'danger',
u'daniel_rand',
u'danielle_moonstar',
u'dansen_macabre',
u'danvers_carol',
u'daredevil',
u'dark_angel',
u'dark_beast',
u'dark_phoenix',
u'dark_crawler',
u'darkdevil',
u'darkhawk',
u'darkoth',
u'darkstar',
u'david_cannon',
u'daytripper',
u'dazzler',
u'deacon_frost',
u'dead_girl',
u'deadhead',
u'deadly_ernest',
u'deadpool',
u'death',
u'death_adder',
u'deaths_head',
u'deaths_head_ii',
u'death_stalker',
u'deathbird',
u'deathlok',
u'deathstroke',
u'deathurge',
u'deathwatch',
u'deborah_ritter',
u'debra_whitman',
u'decay',
u'decay_ii',
u'defensor',
u'delilah',
u'delphi',
u'delphine_courtney',
u'dementia',
u'demiurge',
u'demogoblin',
u'demogorge_the_god_eater',
u'demolition_man',
u'derrick_slegers_speed',
u'desmond_pitt',
u'destiny',
u'destroyer',
u'destroyer_of_demons',
u'devastator',
u'devil_dinosaur',
u'devil_hunter_gabriel',
u'devil_slayer',
u'devos_the_devastator',
u'diablo',
u'diamanda_nero',
u'diamond_lil',
u'diamondback',
u'diamondhead',
u'digitek',
u'dionysus',
u'dirtnap',
u'discus',
u'dittomaster',
u'dmitri_bukharin',
u'dmitri_smerdyakov',
u'doc_samson',
u'doctor_anthony_droom',
u'doctor_arthur_nagan',
u'doctor_bong',
u'doctor_demonicus',
u'doctor_doom',
u'doctor_dorcas',
u'doctor_droom',
u'doctor_druid',
u'doctor_faustus',
u'doctor_glitternight',
u'doctor_leery',
u'doctor_minerva',
u'doctor_octopus',
u'doctor_spectrum',
u'doctor_strange',
u'doctor_sun',
u'domina',
u'dominic_fortune',
u'dominic_petros',
u'domino',
u'dominus',
u'domo',
u'don_fortunato',
u'donald_donny_gill',
u'donald_pierce',
u'donald_ritter',
u'doop',
u'doorman',
u'doppelganger',
u'doppleganger',
u'dorma',
u'dormammu',
u'double_helix',
u'doug_ramsey',
u'doug_and_jerry',
u'dougboy',
u'doughboy',
u'douglas_birely',
u'douglas_ramsey',
u'douglock',
u'dr_john_grey',
u'dr_lemuel_dorcas',
u'dr_marla_jameson',
u'dr_otto_octavius',
u'dracula',
u'dragon_lord',
u'dragon_man',
u'dragon_of_the_moon',
u'dragoness',
u'dragonfly',
u'dragonwing',
u'drax_the_destroyer',
u'dreadknight',
u'dreadnought',
u'dream_weaver',
u'dreaming_celestial',
u'dreamqueen',
u'dredmund_druid',
u'dromedan',
u'druid',
u'druig',
u'dum_dum_dugan',
u'dusk',
u'dust',
u'dweller_in_darkness',
u'dyna_mite',
u'earth_lord',
u'earthquake',
u'ebenezer_laughton',
u'ebon_seeker',
u'echo',
u'ecstasy',
u'ectokid',
u'eddie_brock',
u'edward_ned_buckman',
u'edwin_jarvis',
u'eel',
u'egghead',
u'ego_the_living_planet',
u'el_aguila',
u'el_muerto',
u'elaine_grey',
u'elathan',
u'electric_eve',
u'electro',
u'electrocute',
u'electron',
u'eleggua',
u'elektra',
u'elektra_natchios',
u'elektro',
u'elf_with_a_gun',
u'elfqueen',
u'elias_bogan',
u'eliminator',
u'elixir',
u'elizabeth_betsy_braddock',
u'elizabeth_twoyoungmen',
u'ellie_phimster',
u'elsie_dee',
u'elven',
u'elysius',
u'emil_blonsky',
u'emma_frost',
u'empath',
u'empathoid',
u'emplate',
u'en_sabah_nur',
u'enchantress',
u'energizer',
u'enforcer',
u'enigma',
u'ent',
u'entropic_man',
u'eon',
u'epoch',
u'equilibrius',
u'equinox',
u'ereshkigal',
u'erg',
u'eric_slaughter',
u'eric_williams',
u'eric_the_red',
u'erik_josten',
u'erik_killmonger',
u'erik_magnus_lehnsherr',
u'ernst',
u'eros',
u'eshu',
u'eson_the_searcher',
u'eternal_brain',
u'eternity',
u'ethan_edwards',
u'eugene_judd',
u'ev_teel_urizen',
u'evangeline_whedon',
u'ever',
u'everett_thomas',
u'everyman',
u'evilhawk',
u'executioner',
u'exodus',
u'exploding_man',
u'exterminator',
u'ezekiel',
u'ezekiel_sims',
u'ezekiel_stane',
u'fabian_cortez',
u'fafnir',
u'fagin',
u'falcon',
u'fallen_one',
u'famine',
u'fan_boy',
u'fandral',
u'fang',
u'fantasia',
u'fantastic_four',
u'fantomex',
u'farallah',
u'fasaud',
u'fashima',
u'fatale',
u'fateball',
u'father_time',
u'fault_zone',
u'fearmaster',
u'feedback',
u'felicia_hardy',
u'feline',
u'fenris',
u'fenris_wolf',
u'fer_de_lance',
u'feral',
u'feron',
u'fever_pitch',
u'fight_man',
u'fin',
u'fin_fang_foom',
u'firearm',
u'firebird',
u'firebolt',
u'firebrand',
u'firefrost',
u'firelord',
u'firepower',
u'firestar',
u'fixer',
u'fixx',
u'flag_smasher',
u'flambe',
u'flash_thompson',
u'flatman',
u'flex',
u'flint_marko',
u'flubber',
u'fly',
u'flygirl',
u'flying_tiger',
u'foggy_nelson',
u'fontanelle',
u'foolkiller',
u'forbush_man',
u'force',
u'forearm',
u'foreigner',
u'forge',
u'forgotten_one',
u'foxfire',
u'frank_castle',
u'frank_drake',
u'frank_payne',
u'frank_simpson',
u'frankensteins_monster',
u'frankie_raye',
u'frankie_and_victoria',
u'franklin_hall',
u'franklin_richards',
u'franklin_storm',
u'freak',
u'freak_of_science',
u'freakmaster',
u'freakshow',
u'fred_myers',
u'frederick_slade',
u'free_spirit',
u'freedom_ring',
u'frenzy',
u'frey',
u'frigga',
u'frog_man',
u'fury',
u'fusion',
u'futurist',
u'g_force',
u'gabe_jones',
u'gabriel_summers',
u'gabriel_the_air_walker',
u'gaea',
u'gaia',
u'gailyn_bailey',
u'galactus',
u'galaxy_master',
u'gambit',
u'gammenon_the_gatherer',
u'gamora',
u'ganymede',
u'gardener',
u'gargantua',
u'gargantus',
u'gargouille',
u'gargoyle',
u'garokk_the_petrified_man',
u'garrison_kane',
u'gatecrasher',
u'gateway',
u'gauntlet',
u'gavel',
u'gaza',
u'gazelle',
u'gazer',
u'geb',
u'gee',
u'geiger',
u'geirrodur',
u'gemini',
u'general_orwell_taylor',
u'genis_vell',
u'george_stacy',
u'george_tarleton',
u'george_washington_bridge',
u'georgianna_castleberry',
u'gertrude_yorkes',
u'ghaur',
u'ghost',
u'ghost_dancer',
u'ghost_girl',
u'ghost_maker',
u'ghost_rider',
u'ghost_rider_2099',
u'ghoul',
u'giant_man',
u'gibbon',
u'gibborim',
u'gideon',
u'gideon_mace',
u'giganto',
u'gigantus',
u'gin_genie',
u'gladiator',
u'gladiatrix',
u'glamor',
u'glenn_talbot',
u'glitch',
u'glob',
u'glob_herman',
u'gloom',
u'glorian',
u'goblin_queen',
u'goblyn',
u'godfrey_calthrop',
u'gog',
u'goldbug',
u'golden_archer',
u'golden_girl',
u'golden_oldie',
u'goldeneye',
u'golem',
u'goliath',
u'gomi',
u'googam',
u'gorgeous_george',
u'gorgilla',
u'gorgon',
u'gorilla_girl',
u'gorilla_man',
u'gorr',
u'gosamyr',
u'grand_director',
u'grandmaster',
u'grappler',
u'grasshopper',
u'grasshopper_ii',
u'graviton',
u'gravity',
u'graydon_creed',
u'great_gambonnos',
u'great_video',
u'green_goblin',
u'green_goblin_iv',
u'greer_grant',
u'greer_grant_nelson',
u'gregor_shapanka',
u'gregory_gideon',
u'gremlin',
u'grenade',
u'grey_gargoyle',
u'grey_king',
u'griffin',
u'grim_hunter',
u'grim_reaper',
u'grizzly',
u'grog_the_god_crusher',
u'gronk',
u'grotesk',
u'groundhog',
u'growing_man',
u'guardsman',
u'guido_carosella',
u'gunthar_of_rigel',
u'gwen_stacy',
u'gypsy_moth',
u'herbie',
u'hack',
u'hag',
u'hairbag',
u'halflife',
u'halloween_jack',
u'hamilton_slade',
u'hammer_harrison',
u'hammer_and_anvil',
u'hammerhead',
u'hangman',
u'hank_mccoy',
u'hank_pym',
u'hanna_levy',
u'hannah_levy',
u'hannibal_king',
u'harald_jaekelsson',
u'hardcase',
u'hardcore',
u'hardnose',
u'hardshell',
u'hardwire',
u'hargen_the_measurer',
u'harmonica',
u'harness',
u'harold_happy_hogan',
u'harold_h_harold',
u'harpoon',
u'harpy',
u'harrier',
u'harry_leland',
u'harry_osborn',
u'hate_monger',
u'haven',
u'havok',
u'hawkeye',
u'hawkeye_ii',
u'hawkshaw',
u'haywire',
u'hazard',
u'hazmat',
u'headknocker',
u'headlok',
u'heart_attack',
u'heather_cameron',
u'hebe',
u'hecate',
u'hector',
u'heimdall',
u'heinrich_zemo',
u'hela',
u'helio',
u'hellcat',
u'helleyes',
u'hellfire',
u'hellion',
u'hellrazor',
u'helmut_zemo',
u'henry_hank_mccoy',
u'henry_peter_gyrich',
u'hensley_fargus',
u'hephaestus',
u'hepzibah',
u'her',
u'hera',
u'herbert_edgar_wyndham',
u'hercules',
u'herman_schultz',
u'hermes',
u'hermod',
u'hero',
u'hero_for_hire',
u'herr_kleiser',
u'hideko_takata',
u'high_evolutionary',
u'high_tech',
u'hijacker',
u'hildegarde',
u'him',
u'hindsight_lad',
u'hippolyta',
u'hisako_ichiki',
u'hit_maker',
u'hitman',
u'hobgoblin',
u'hobgoblin_ii',
u'hoder',
u'hogun',
u'holly',
u'honcho',
u'honey_lemon',
u'hood',
u'hornet',
u'horus',
u'howard_the_duck',
u'hrimhari',
u'hub',
u'hugh_jones',
u'hulk',
u'hulk_2099',
u'hulkling',
u'human_cannonball',
u'human_fly',
u'human_robot',
u'human_top',
u'human_top_ii',
u'human_torch',
u'human_torch_ii',
u'humbug',
u'humus_sapien',
u'huntara',
u'hurricane',
u'husk',
u'hussar',
u'hybrid',
u'hybrid_ii',
u'hyde',
u'hydro',
u'hydro_man',
u'hydron',
u'hyperion',
u'hyperkind',
u'hyperstorm',
u'hypnotia',
u'hyppokri',
u'isaac',
u'icarus',
u'iceman',
u'icemaster',
u'idunn',
u'iguana',
u'ikaris',
u'ikonn',
u'ikthalon',
u'illusion',
u'illyana_rasputin',
u'immortus',
u'impala',
u'imperial_hydra',
u'impossible_man',
u'impulse',
u'in_betweener',
u'indech',
u'indra',
u'inertia',
u'infamnia',
u'infant_terrible',
u'infectia',
u'inferno',
u'infinity',
u'interloper',
u'invisible_girl',
u'invisible_woman',
u'inza',
u'ion',
u'iridia',
u'iron_cross',
u'iron_fist',
u'iron_lad',
u'iron_maiden',
u'iron_man',
u'iron_man_2020',
u'iron_monger',
u'ironclad',
u'isaac_christians',
u'isaiah_bradley',
u'isbisa',
u'isis',
u'ivan_kragoff',
u'j_jonah_jameson',
u'j2',
u'jack_flag',
u'jack_frost',
u'jack_kirby',
u'jack_olantern',
u'jack_power',
u'jack_of_hearts',
u'jack_in_the_box',
u'jackal',
u'jackdaw',
u'jackhammer',
u'jackpot',
u'jackson_arvad',
u'jacob_jake_fury',
u'jacqueline_falsworth',
u'jacques_duquesne',
u'jade_dragon',
u'jaeger',
u'jaguar',
u'jamal_afari',
u'james_jimmy_marks',
u'james_dr_power',
u'james_howlett',
u'james_jaspers',
u'james_madrox',
u'james_proudstar',
u'james_rhodes',
u'james_sanders',
u'jamie_braddock',
u'jane_foster',
u'jane_kincaid',
u'janet_van_dyne',
u'jann',
u'janus',
u'jared_corbo',
u'jarella',
u'jaren',
u'jason',
u'jawynn_dueck_the_iron_christian_of_faith',
u'jazz',
u'jean_dewolff',
u'jean_grey',
u'jean_grey_summers',
u'jean_paul_beaubier',
u'jeanne_marie_beaubier',
u'jebediah_guthrie',
u'jeffrey_mace',
u'jekyll',
u'jennifer_kale',
u'jennifer_walters',
u'jens_meilleur_slap_shot',
u'jericho_drumm',
u'jerome_beechman',
u'jerry_jaxon',
u'jessica_drew',
u'jessica_jones',
u'jester',
u'jigsaw',
u'jim_hammond',
u'jimaine_szardos',
u'jimmy_woo',
u'jocasta',
u'joe_cartelli',
u'joe_fixit',
u'joey_bailey',
u'johann_schmidt',
u'john_doe',
u'john_falsworth',
u'john_jameson',
u'john_proudstar',
u'john_ryker',
u'john_sublime',
u'john_walker',
u'johnny_blaze',
u'johnny_ohm',
u'johnny_storm',
u'jolt',
u'jon_spectre',
u'jonas_harrow',
u'jonathan_john_garrett',
u'jonathan_richards',
u'jonothon_starsmore',
u'jordan_seberius',
u'joseph',
u'joshua_guthrie',
u'joystick',
u'jubilee',
u'judas_traveller',
u'jude_the_entropic_man',
u'juggernaut',
u'julie_power',
u'jumbo_carnation',
u'junkpile',
u'junta',
u'justice',
u'justin_hammer',
u'justine_hammer',
u'ka_zar',
u'kaine',
u'kala',
u'kaluu',
u'kamal',
u'kamo_tharnn',
u'kamuu',
u'kang_the_conqueror',
u'kangaroo',
u'karen_page',
u'karima_shapandar',
u'karkas',
u'karl_lykos',
u'karl_malus',
u'karl_mordo',
u'karla_sofen',
u'karma',
u'karnak',
u'karnilla',
u'karolina_dean',
u'karthon_the_quester',
u'kasper_cole',
u'kate_bishop',
u'kate_neville',
u'katherine_kitty_pryde',
u'katherine_reynolds',
u'katie_power',
u'katrina_luisa_van_horne',
u'katu',
u'keen_marlow',
u'kehl_of_tauran',
u'keith_kilham',
u'kem_horkus',
u'kenneth_crichton',
u'key',
u'khaos',
u'khonshu',
u'khoryphos',
u'kiber_the_cruel',
u'kick_ass',
u'kid_colt',
u'kid_nova',
u'kiden_nixon',
u'kierrok',
u'killer_shrike',
u'killpower',
u'killraven',
u'kilmer',
u'kimura',
u'king_bedlam',
u'kingo_sunen',
u'kingpin',
u'kirigi',
u'kirtsyn_perrin_short_stop',
u'kismet',
u'kismet_deadly',
u'kiss',
u'kiwi_black',
u'kkallakku',
u'klrt',
u'klaatu',
u'klaw',
u'kleinstocks',
u'knickknack',
u'kofi_whitemane',
u'kogar',
u'kohl_harder_boulder_man',
u'korath_the_pursuer',
u'korg',
u'kormok',
u'korrek',
u'korvac',
u'korvus',
u'kosmos',
u'kraken',
u'krakkan',
u'krang',
u'kraven_the_hunter',
u'krista_marwan',
u'kristoff_vernard',
u'kristoff_von_doom',
u'kro',
u'krystalin',
u'kubik',
u'kukulcan',
u'kurse',
u'kurt_wagner',
u'kwannon',
u'kyle_gibney',
u'kylun',
u'kymaera',
u'la_lunatica',
u'la_nuit',
u'lacuna',
u'lady_deathstrike',
u'lady_jacqueline_falsworth_crichton',
u'lady_killer',
u'lady_lark',
u'lady_lotus',
u'lady_mandarin',
u'lady_mastermind',
u'lady_octopus',
u'lament',
u'lancer',
u'landslide',
u'larry_bodine',
u'lasher',
u'laura_dean',
u'layla_miller',
u'lazarus',
u'leader',
u'leap_frog',
u'leash',
u'lee_forrester',
u'leech',
u'left_hand',
u'left_winger',
u'legacy',
u'legion',
u'leila_davis',
u'leir',
u'lemuel_dorcas',
u'leo',
u'leonard_samson',
u'leonus',
u'letha',
u'levan',
u'lianda',
u'libra',
u'lifeforce',
u'lifeguard',
u'lifter',
u'lightbright',
u'lighting_rod',
u'lightmaster',
u'lightspeed',
u'lila_cheney',
u'lilandra_neramani',
u'lilith_the_daughter_of_dracula',
u'lin_sun',
u'link',
u'lionheart',
u'live_wire',
u'living_brain',
u'living_colossus',
u'living_diamond',
u'living_eraser',
u'living_hulk',
u'living_laser',
u'living_lightning',
u'living_monolith',
u'living_mummy',
u'living_pharaoh',
u'living_planet',
u'living_totem',
u'living_tribunal',
u'liz_allan',
u'lizard',
u'llan_the_sorcerer',
u'lloigoroth',
u'llyra',
u'llyron',
u'loa',
u'lockdown',
u'lockheed',
u'lockjaw',
u'locksmith',
u'locus',
u'locust',
u'lodestone',
u'logan',
u'loki',
u'longneck',
u'longshot',
u'lonnie_thompson_lincoln',
u'looter',
u'lord_chaos',
u'lord_dark_wind',
u'lord_pumpkin',
u'lorelei',
u'lorelei_ii',
u'lorelei_travis',
u'lorna_dane',
u'lorvex',
u'loss',
u'louise_mason',
u'lucas_brand',
u'luchino_nefaria',
u'lucifer',
u'ludi',
u'luke_cage',
u'luna',
u'lunatica',
u'lunatik',
u'lupa',
u'lupo',
u'lurking_unknown',
u'lyja',
u'lynx',
u'm',
u'm_twins',
u'mn_e_ultraverse',
u'modam',
u'modok',
u'mac_gargan',
u'mach_iv',
u'machine_man',
u'machine_teen',
u'machinesmith',
u'mad_dog_rassitano',
u'mad_jack',
u'mad_jim_jaspers',
u'mad_thinker',
u'mad_thinkers_awesome_android',
u'mad_dog',
u'madam_slay',
u'madame_hydra',
u'madame_macevil',
u'madame_masque',
u'madame_menace',
u'madame_web',
u'madcap',
u'madeline_joyce',
u'madelyne_pryor',
u'madison_jeffries',
u'maelstrom',
u'maestro',
u'magdalena',
u'magdalene',
u'maggott',
u'magician',
u'magik',
u'magilla',
u'magma',
u'magneto',
u'magnum',
u'magnus',
u'magus',
u'maha_yogi',
u'mahkizmo',
u'major_mapleleaf',
u'makkari',
u'malekith_the_accursed',
u'malice',
u'mammomax',
u'man_mountain_marko',
u'man_ape',
u'man_beast',
u'man_brute',
u'man_bull',
u'man_eater',
u'man_elephant',
u'man_killer',
u'man_spider',
u'man_thing',
u'man_wolf',
u'manbot',
u'mandarin',
u'mandrill',
u'mandroid',
u'mangle',
u'mangog',
u'manikin',
u'manslaughter',
u'manta',
u'mantis',
u'mantra',
u'mar_vell',
u'marc_spector',
u'marduk_kurios',
u'margali_szardos',
u'margaret_power',
u'margo_damian',
u'maria_hill',
u'mariko_yashida',
u'marius_st_croix',
u'mark_gervaisnight_shade',
u'mark_raxton',
u'mark_scarlotti',
u'mark_todd',
u'marlene_alraune',
u'marrina',
u'marrina_smallwood',
u'marrow',
u'marsha_rosenberg',
u'martha_johansson',
u'martin_gold',
u'martin_preston',
u'martinex',
u'marvel_boy',
u'marvel_girl',
u'marvel_man',
u'marvin_flumm',
u'mary_skeeter_macpherran',
u'mary_jane_parker',
u'mary_jane_watson',
u'mary_walker',
u'mary_zero',
u'masked_marauder',
u'masked_marvel',
u'masked_rose',
u'masque',
u'mass_master',
u'master_khan',
u'master_man',
u'master_menace',
u'master_mold',
u'master_order',
u'master_pandemonium',
u'master_of_vengeance',
u'mastermind',
u'mastermind_of_the_uk',
u'matador',
u'match',
u'matsuo_tsurayaba',
u'matt_murdock',
u'mauler',
u'maur_konn',
u'mauvais',
u'maverick',
u'max',
u'maxam',
u'maximus',
u'maxwell_dillon',
u'may_mayday_parker',
u'may_parker',
u'mayhem',
u'maynard_tiboldt',
u'meanstreak',
u'meathook',
u'mechamage',
u'medusa',
u'meggan',
u'meggan_braddock',
u'mekano',
u'meld',
u'melee',
u'melissa_gold',
u'melody_guthrie',
u'meltdown',
u'melter',
u'mentallo',
u'mentor',
u'mentus',
u'mephisto',
u'mercurio',
u'mercury',
u'mercy',
u'merlin',
u'mesmero',
u'metal_master',
u'metalhead',
u'meteor_man',
u'meteorite',
u'meteorite_ii',
u'michael_nowman',
u'michael_twoyoungmen',
u'micro',
u'microchip',
u'micromax',
u'midas',
u'midgard_serpent',
u'midnight',
u'midnight_man',
u'midnight_sun',
u'miek',
u'miguel_espinosa',
u'miguel_ohara',
u'miguel_santos',
u'mikado',
u'mikey',
u'mikhail_rasputin',
u'mikula_golubev',
u'milan',
u'miles_warren',
u'milos_masaryk',
u'mimic',
u'mimir',
u'mindmeld',
u'mindworm',
u'miracle_man',
u'mirage',
u'mirage_ii',
u'misfit',
u'miss_america',
u'missing_link',
u'mist_mistress',
u'mister_buda',
u'mister_doll',
u'mister_fear',
u'mister_hyde',
u'mister_jip',
u'mister_machine',
u'mister_one',
u'mister_sensitive',
u'mister_sinister',
u'mister_two',
u'mister_x',
u'misty_knight',
u'mockingbird',
u'modred_the_mystic',
u'mogul_of_the_mystic_mountain',
u'moira_brandon',
u'moira_mactaggert',
u'mojo',
u'mole_man',
u'molecule_man',
u'molly_hayes',
u'molten_man',
u'mondo',
u'monet_st_croix',
u'mongoose',
u'monica_rappaccini',
u'monsoon',
u'monstra',
u'monstro_the_mighty',
u'moon_knight',
u'moon_boy',
u'moondark',
u'moondragon',
u'moonhunter',
u'moonstone',
u'mop_man',
u'morbius',
u'mordred',
u'morg',
u'morgan_le_fay',
u'morlun',
u'morning_star',
u'morph',
u'morpheus',
u'morris_bench',
u'mortimer_toynbee',
u'moses_magnum',
u'mosha',
u'mother_earth',
u'mother_nature',
u'mother_night',
u'mother_superior',
u'motormouth',
u'mountjoy',
u'mr_fish',
u'mr_justice',
u'mr_m',
u'mr_wu',
u'ms_modok',
u'ms_marvel',
u'ms_steed',
u'multiple_man',
u'murmur',
u'murmur_ii',
u'mutant_master',
u'mutant_x',
u'myron_maclain',
u'mys_tech',
u'mysterio',
u'mystique',
u'ngabthoth',
u'ngarai',
u'nastirh',
u'nfl_superpro',
u'naga',
u'nameless_one',
u'namor_mckenzie',
u'namor_the_sub_mariner',
u'namora',
u'namorita',
u'nanny',
u'nate_grey',
u'nathaniel_essex',
u'nathaniel_richards',
u'native',
u'nebula',
u'nebulo',
u'nebulon',
u'nebulos',
u'necrodamus',
u'necromantra',
u'ned_horrocks',
u'ned_leeds',
u'needle',
u'nefarius',
u'negasonic_teenage_warhead',
u'nekra',
u'nekra_sinclar',
u'nemesis',
u'neophyte',
u'neptune',
u'network',
u'neuronne',
u'neurotap',
u'new_goblin',
u'nezarr_the_calculator',
u'nicholas_maunder',
u'nicholas_scratch',
u'nick_fury',
u'nico_minoru',
u'nicole_st_croix',
u'night_nurse',
u'night_rider',
u'night_thrasher',
u'nightcrawler',
u'nighthawk',
u'nightmare',
u'nightshade',
u'nightside',
u'nightwatch',
u'nightwind',
u'nikki',
u'niles_van_roekel',
u'nimrod',
u'ningal',
u'nitro',
u'nobilus',
u'nocturne',
u'noh_varr',
u'nomad',
u'norman_osborn',
u'norns',
u'norrin_radd',
u'northstar',
u'nosferata',
u'nova',
u'nova_prime',
u'novs',
u'nox',
u'nth_man',
u'nth_man_the_ultimate_ninja',
u'nuke_frank_simpson',
u'nuke_squadron_supreme_member',
u'nuklo',
u'numinus',
u'nut',
u'obadiah_stane',
u'obituary',
u'obliterator',
u'oblivion',
u'occulus',
u'ocean',
u'ocelot',
u'oddball',
u'odin',
u'ogre',
u'ogress',
u'omega',
u'omega_red',
u'omega_the_unknown',
u'omen',
u'omerta',
u'one_above_all',
u'oneg_the_prober',
u'onslaught',
u'onyxx',
u'ooze',
u'optoman',
u'oracle',
u'orator',
u'orb',
u'orbit',
u'orchid',
u'ord',
u'order',
u'orikal',
u'orka',
u'ororo_munroe',
u'orphan',
u'orphan_maker',
u'osiris',
u'outlaw',
u'outrage',
u'overkill',
u'overmind',
u'overrider',
u'owl',
u'ox',
u'ozone',
u'ozymandias',
u'paibo',
u'paige_guthrie',
u'paladin',
u'paradigm',
u'paragon',
u'paralyzer',
u'paris',
u'pasco',
u'paste_pot_pete',
u'patch',
u'pathway',
u'patriot',
u'patriot_ii',
u'patsy_hellstrom',
u'patsy_walker',
u'paul_bailey',
u'paul_norbert_ebersol',
u'paul_patterson',
u'payback',
u'peace_monger',
u'peepers',
u'peggy_carter',
u'penance',
u'penance_ii',
u'peregrine',
u'perfection',
u'perseus',
u'persuader',
u'persuasion',
u'perun',
u'pete_wisdom',
u'peter_criss',
u'peter_noble',
u'peter_parker',
u'peter_petruski',
u'phade',
u'phage',
u'phalanx',
u'phantazia',
u'phantom_blonde',
u'phantom_eagle',
u'phantom_rider',
u'phastos',
u'phat',
u'phil_urich',
u'philip_fetter',
u'phineas_t_horton',
u'phoenix',
u'photon',
u'phyla_vell',
u'pietro_maximoff',
u'piledriver',
u'piotr_rasputin',
u'pip_the_troll',
u'pipeline',
u'piper',
u'piranha',
u'pisces',
u'pistol',
u'pixie',
u'pixx',
u'plague',
u'plantman',
u'plasma',
u'plazm',
u'plug',
u'plunderer',
u'pluto',
u'poison',
u'polaris',
u'poltergeist',
u'porcupine',
u'portal',
u'possessor',
u'postman',
u'postmortem',
u'poundcakes',
u'powderkeg',
u'power_broker',
u'power_man',
u'power_princess',
u'power_skrull',
u'powerhouse',
u'powerpax',
u'presence',
u'pressure',
u'prester_john',
u'pretty_persuasions',
u'preview',
u'primal',
u'prime',
u'prime_mover',
u'primevil',
u'primus',
u'princess_python',
u'proctor',
u'prodigy',
u'professor_power',
u'professor_x',
u'projector',
u'prometheus',
u'protector',
u'proteus',
u'prototype',
u'prowler',
u'psi_lord',
u'psyche',
u'psycho_man',
u'psyklop',
u'psylocke',
u'puck',
u'puff_adder',
u'puishannt',
u'pulse',
u'puma',
u'punchout',
u'punisher',
u'punisher_2099',
u'puppet_master',
u'purge',
u'purple_girl',
u'purple_man',
u'pyre',
u'pyro',
u'quagmire',
u'quantum',
u'quasar',
u'quasar_ii',
u'quasimodo',
u'quentin_beck',
u'quentin_quire',
u'quicksand',
u'quicksilver',
u'quincy_harker',
u'raa_of_the_caves',
u'rachel_grey',
u'rachel_summers',
u'rachel_van_helsing',
u'radian',
u'radioactive_man',
u'radion_the_atomic_man',
u'radius',
u'rafferty',
u'rage',
u'raggadorr',
u'rahne_sinclair',
u'rainbow',
u'rama_tut',
u'raman',
u'ramrod',
u'ramshot',
u'rancor',
u'randall_shire',
u'random',
u'ranger',
u'ransak_the_reject',
u'rattler',
u'ravage_2099',
u'raving_beauty',
u'rawhide_kid',
u'rax',
u'raymond_sikorsky',
u'raza',
u'razor_fist',
u'razorback',
u'reaper',
u'rebel',
u'recorder',
u'red_claw',
u'red_ghost',
u'red_guardian',
u'red_lotus',
u'red_nine',
u'red_raven',
u'red_ronin',
u'red_shift',
u'red_skull',
u'red_skull_ii',
u'red_wolf',
u'redeemer',
u'redneck',
u'redwing',
u'reeva_payge',
u'reignfire',
u'reject',
u'remnant',
u'remy_lebeau',
u'reptyl',
u'revanche',
u'rex_mundi',
u'rhiannon',
u'rhino',
u'ricadonna',
u'richard_fisk',
u'richard_parker',
u'richard_rider',
u'rick_jones',
u'ricochet',
u'rictor',
u'rigellian_recorder',
u'right_winger',
u'ringer',
u'ringleader',
u'ringmaster',
u'ringo_kid',
u'rintrah',
u'riot',
u'riot_grrl',
u'ripfire',
u'ritchie_gilmore',
u'rlnnd',
u'robbie_robertson',
u'robert_bobby_drake',
u'robert_bruce_banner',
u'robert_hunter',
u'robert_kelly',
u'robert_da_costa',
u'rock',
u'rock_python',
u'rocket_raccoon',
u'rocket_racer',
u'rodstvow',
u'rogue',
u'rom_the_spaceknight',
u'roma',
u'romany_wisdom',
u'ronan_the_accuser',
u'rose',
u'roughhouse',
u'roulette',
u'royal_roy',
u'ruby_thursday',
u'ruckus',
u'rumiko_fujikawa',
u'rune',
u'runner',
u'rush',
u'rusty_collins',
u'ruth_bat_seraph',
u'ryder',
u'sbyll',
u'sym',
u'sabra',
u'sabreclaw',
u'sabretooth',
u'sack',
u'sage',
u'sagittarius',
u'saint_anna',
u'saint_elmo',
u'sally_blevins',
u'sally_floyd',
u'salvo',
u'sam_sawyer',
u'sam_wilson',
u'samuel_starr_saxon',
u'samuel_guthrie',
u'samuel_silke',
u'samuel_smithers',
u'sandman',
u'sangre',
u'sara_grey',
u'sasquatch',
u'satana',
u'satannish',
u'saturnyne',
u'sauron',
u'savage_steel',
u'sayge',
u'scaleface',
u'scalphunter',
u'scanner',
u'scarecrow',
u'scarecrow_ii',
u'scarlet_beetle',
u'scarlet_centurion',
u'scarlet_scarab',
u'scarlet_spider',
u'scarlet_spiders',
u'scarlet_witch',
u'schemer',
u'scimitar',
u'scintilla',
u'scorcher',
u'scorpia',
u'scorpio',
u'scorpion',
u'scott_summers',
u'scott_washington',
u'scourge_of_the_underworld',
u'scrambler',
u'scream',
u'screaming_mimi',
u'screech',
u'scrier',
u'sea_urchin',
u'seamus_mellencamp',
u'sean_cassidy',
u'sean_garrison',
u'sebastian_shaw',
u'seeker',
u'sekhmet',
u'selene',
u'senator_robert_kelly',
u'senor_muerte',
u'sentry',
u'sepulchre',
u'sergeant_fury',
u'sergei_kravinoff',
u'serpentina',
u'sersi',
u'set',
u'seth',
u'shadow_king',
u'shadow_slasher',
u'shadow_hunter',
u'shadowcat',
u'shadowmage',
u'shadrac',
u'shalla_bal',
u'shaman',
u'shamrock',
u'shang_chi',
u'shanga',
u'shanna_the_she_devil',
u'shaper_of_worlds',
u'shard',
u'sharon_carter',
u'sharon_friedlander',
u'sharon_ventura',
u'shathra',
u'shatter',
u'shatterfist',
u'shatterstar',
u'she_hulk',
u'she_thing',
u'she_venom',
u'shellshock',
u'shen_kuei',
u'shiar_gladiator',
u'shinchuko_lotus',
u'shingen_harada',
u'shinobi_shaw',
u'shirow_ishihara',
u'shiva',
u'shiver_man',
u'shocker',
u'shockwave',
u'shola_inkosi',
u'shooting_star',
u'shotgun',
u'shriek',
u'shriker',
u'shroud',
u'shrunken_bones',
u'shuma_gorath',
u'sidewinder',
u'siege',
u'siena_blaze',
u'sif',
u'sigmar',
u'sigyn',
u'sikorsky',
u'silhouette',
u'silly_seal',
u'silver',
u'silver_dagger',
u'silver_fox',
u'silver_sable',
u'silver_samurai',
u'silver_scorpion',
u'silver_squire',
u'silver_surfer',
u'silverclaw',
u'silvermane',
u'simon_williams',
u'sin',
u'sin_eater',
u'sinister',
u'sir_steel',
u'siryn',
u'sise_neg',
u'skein',
u'skids',
u'skin',
u'skinhead',
u'skull_the_slayer',
u'skullcrusher',
u'skullfire',
u'skunge_the_laxidazian_troll',
u'skyhawk',
u'skywalker',
u'slab',
u'slapstick',
u'sleek',
u'sleeper',
u'sleepwalker',
u'slick',
u'sligguth',
u'slipstream',
u'slither',
u'sludge',
u'slug',
u'sluggo',
u'sluk',
u'slyde',
u'smart_alec',
u'smartship_friday',
u'smasher',
u'smuggler',
u'smuggler_ii',
u'snowbird',
u'snowfall',
u'solara',
u'solarman',
u'solarr',
u'soldier_x',
u'solitaire',
u'solo',
u'solomon_osullivan',
u'son_of_satan',
u'songbird',
u'soulfire',
u'space_phantom',
u'space_turnip',
u'specialist',
u'spectra',
u'spectral',
u'speed',
u'speed_demon',
u'speedball',
u'speedo',
u'spellbinder',
u'spellcheck',
u'spencer_smythe',
u'sphinx',
u'sphinxor',
u'spider_doppelganger',
u'spider_girl',
u'spider_ham',
u'spider_man',
u'spider_slayer',
u'spider_woman',
u'spidercide',
u'spike',
u'spike_freeman',
u'spinnerette',
u'spiral',
u'spirit_of_76',
u'spitfire',
u'spoilsport',
u'spoor',
u'spot',
u'sprite',
u'sputnik',
u'spyder',
u'spymaster',
u'spyne',
u'squidboy',
u'squirrel_girl',
u'st_john_allerdyce',
u'stacy_x',
u'stained_glass_scarlet',
u'stakar',
u'stallior',
u'stanley_stewart',
u'star_stalker',
u'star_thief',
u'star_dancer',
u'star_lord',
u'starbolt',
u'stardust',
u'starfox',
u'starhawk',
u'starlight',
u'starr_the_slayer',
u'starshine',
u'starstreak',
u'stature',
u'steel_raven',
u'steel_serpent',
u'steel_spider',
u'stegron',
u'stellaris',
u'stem_cell',
u'stentor',
u'stephen_colbert',
u'stephen_strange',
u'steve_rogers',
u'steven_lang',
u'stevie_hunter',
u'stick',
u'stiletto',
u'stilt_man',
u'stinger',
u'stingray',
u'stitch',
u'stone',
u'stonecutter',
u'stonewall',
u'storm',
u'stranger',
u'stratosfire',
u'straw_man',
u'strobe',
u'strong_guy',
u'strongarm',
u'stryfe',
u'stunner',
u'stuntmaster',
u'stygorr',
u'stygyro',
u'styx_and_stone',
u'sub_mariner',
u'sugar_man',
u'suicide',
u'sultan',
u'sun_girl',
u'sunder',
u'sundragon',
u'sunfire',
u'sunpyre',
u'sunset_bain',
u'sunspot',
u'sunstreak',
u'sunstroke',
u'sunturion',
u'super_rabbit',
u'super_sabre',
u'super_adaptoid',
u'super_nova',
u'super_skrull',
u'superpro',
u'supercharger',
u'superia',
u'supernalia',
u'suprema',
u'supreme_intelligence',
u'supremor',
u'surge',
u'surtur',
u'susan_richards',
u'susan_storm',
u'sushi',
u'svarog',
u'swarm',
u'sweetface',
u'swordsman',
u'sybil_dorn',
u'sybil_dvorak',
u'synch',
u't_ray',
u'tabitha_smith',
u'tag',
u'tagak_the_leopard_lord',
u'tailhook',
u'taj_nital',
u'talia_josephine_wagner',
u'talisman',
u'tamara_rahn',
u'tana_nile',
u'tantra',
u'tanya_anderssen',
u'tarantula',
u'tarot',
u'tartarus',
u'taskmaster',
u'tatterdemalion',
u'tattletale',
u'tattoo',
u'taurus',
u'techno',
u'tefral_the_surveyor',
u'tempest',
u'tempo',
u'tempus',
u'temugin',
u'tenpin',
u'termagaira',
u'terminator',
u'terminatrix',
u'terminus',
u'terrax_the_tamer',
u'terraxia',
u'terror',
u'tess_one',
u'tessa',
u'tether',
u'tethlam',
u'tex_dawson',
u'texas_twister',
u'thakos',
u'thane_ector',
u'thanos',
u'the_amazing_tanwir_ahmed',
u'the_angel',
u'the_blank',
u'the_destroyer',
u'the_entity',
u'the_grip',
u'the_night_man',
u'the_profile',
u'the_russian',
u'the_stepford_cuckoos',
u'the_symbiote',
u'the_wink',
u'thena',
u'theresa_cassidy',
u'thermo',
u'thin_man',
u'thing',
u'thinker',
u'thirty_three',
u'thog',
u'thomas_halloway',
u'thor',
u'thor_girl',
u'thornn',
u'threnody',
u'thumbelina',
u'thunderball',
u'thunderbird',
u'thunderbolt',
u'thunderclap',
u'thunderfist',
u'thunderstrike',
u'thundra',
u'tiboro',
u'tiger_shark',
u'tigra',
u'timberius',
u'time_bomb',
u'timeshadow',
u'timeslip',
u'tinkerer',
u'titan',
u'titania',
u'titanium_man',
u'tito_bohusk',
u'toad',
u'toad_in_waiting',
u'todd_arliss',
u'tom_cassidy',
u'tom_corsi',
u'tom_foster',
u'tom_thumb',
u'tomazooma',
u'tombstone',
u'tommy',
u'tommy_lightning',
u'tomorrow_man',
u'tony_stark',
u'topaz',
u'topspin',
u'torgo_of_mekka',
u'torgo_the_vampire',
u'toro',
u'torpedo',
u'torrent',
u'torso',
u'tower',
u'toxin',
u'trader',
u'trapper',
u'trapster',
u'tremolo',
u'trevor_fitzroy',
u'tri_man',
u'triathlon',
u'trick_shot',
u'trioccula',
u'trip_monroe',
u'triton',
u'troll',
u'trump',
u'tuc',
u'tugun',
u'tumbler',
u'tundra',
u'turac',
u'turbo',
u'turner_century',
u'turner_d_century',
u'tusk',
u'tutinax_the_mountain_mover',
u'two_gun_kid',
u'tyger_tiger',
u'typeface',
u'typhoid',
u'typhoid_mary',
u'typhon',
u'tyr',
u'tyrak',
u'tyrannosaur',
u'tyrannus',
u'tyrant',
u'tzabaoth',
u'u_go_girl',
u'u_man',
u'usagent',
u'uatu',
u'ulik',
u'ultimo',
u'ultimus',
u'ultra_marine',
u'ultragirl',
u'ultron',
u'ulysses',
u'umar',
u'umbo',
u'uncle_ben_parker',
u'uni_mind',
u'unicorn',
u'union_jack',
u'unseen',
u'unthinnk',
u'unus_the_untouchable',
u'unuscione',
u'ursa_major',
u'urthona',
u'utgard_loki',
u'vagabond',
u'vague',
u'vakume',
u'valentina_allegra_de_la_fontaine',
u'valerie_cooper',
u'valinor',
u'valkin',
u'valkyrie',
u'valtorr',
u'vamp',
u'vampire_by_night',
u'vance_astro',
u'vance_astrovik',
u'vanguard',
u'vanisher',
u'vapor',
u'vargas',
u'varnae',
u'vashti',
u'vavavoom',
u'vector',
u'vegas',
u'veil',
u'vengeance',
u'venom',
u'venomm',
u'venus',
u'venus_dee_milo',
u'veritas',
u'vermin',
u'vertigo',
u'vesta',
u'vibraxas',
u'vibro',
u'victor_creed',
u'victor_mancha',
u'victor_strange',
u'victor_von_doom',
u'victorius',
u'vidar',
u'vincente',
u'vindaloo',
u'vindicator',
u'viper',
u'virako',
u'virginia_pepper_potts',
u'virgo',
u'vishanti',
u'visimajoris',
u'vision',
u'vivisector',
u'vixen',
u'volcana',
u'volla',
u'volpan',
u'volstagg',
u'vulcan',
u'vulture',
u'wade_wilson',
u'wallflower',
u'walter_newell',
u'wanda_maximoff',
u'war',
u'war_eagle',
u'war_machine',
u'war_v',
u'warbird',
u'warhawk',
u'warlock',
u'warpath',
u'warren_iii_worthington',
u'warrior_woman',
u'warstar',
u'warstrike',
u'warwolves',
u'washout',
u'wasp',
u'watcher',
u'water_wizard',
u'watoomb',
u'weapon_x',
u'wendell_vaughn',
u'wendigo',
u'werewolf_by_night',
u'western_kid',
u'whiplash',
u'whirlwind',
u'whistler',
u'white_fang',
u'white_pilgrim',
u'white_queen',
u'white_rabbit',
u'white_tiger',
u'whiteout',
u'whizzer',
u'wiccan',
u'wicked',
u'widget',
u'wilbur_day',
u'wild_child',
u'wild_thing',
u'wildboys',
u'wildpride',
u'wildside',
u'will_o_the_wisp',
u'william_baker',
u'william_stryker',
u'willie_lumpkin',
u'wilson_fisk',
u'wind_dancer',
u'wind_warrior',
u'windeagle',
u'windshear',
u'winky_man',
u'winter_soldier',
u'witchfire',
u'wiz_kid',
u'wizard',
u'wolf',
u'wolfsbane',
u'wolverine',
u'wonder_man',
u'wong',
u'woodgod',
u'worm',
u'wraith',
u'wrath',
u'wreckage',
u'wrecker',
u'wundarr_the_aquarian',
u'wyatt_wingfoot',
u'wysper',
u'x_23',
u'x_cutioner',
u'x_man',
u'x_ray',
u'x_treme',
u'xandu',
u'xavin',
u'xemnu_the_titan',
u'xemu',
u'xian_chi_xan',
u'xorn',
u'xorr_the_god_jewel',
u'ygaron',
u'yandroth',
u'yellow_claw',
u'yellowjacket',
u'yeti',
u'yith',
u'ymir',
u'yondu',
u'yrial',
u'yukio',
u'yukon_jack',
u'yuri_topolov',
u'yuriko_oyama',
u'zabu',
u'zach',
u'zaladane',
u'zarathos',
u'zarek',
u'zartra',
u'zebediah_killgrave',
u'zeitgeist',
u'zero',
u'zero_g',
u'zeus',
u'ziggy_pig',
u'zip_zap',
u'zodiak',
u'zom',
u'zombie',
u'zuras',
u'zzzax',
u'gen_harada',
u'the_living_colossus_it',
u'the_living_darkness_null',
u'the_renegade_watcher_aron',
u'the_tomorrow_man_zarrko'
]
def get_float_from_string(seed):
''' Probably bad way to get a float from secret key'''
max_sha_float = float(115792089237316195423570985008687907853269984665640564039457584007913129639935)
h = hashlib.sha256(seed.encode('utf-8'))
return int(h.hexdigest(), 16) / max_sha_float
def shuffle_list(original, seed):
''' Same shuffle for same seed'''
float_seed = get_float_from_string(seed)
return random.shuffle(original, lambda: float_seed)
shuffle_list(NAMES, settings.SECRET_KEY)
def get_name_from_number(num):
x = num % len(NAMES)
return NAMES[x]
|
LilithWittmann/froide
|
froide/helper/name_generator.py
|
Python
|
mit
| 41,989
|
[
"CRYSTAL",
"Jaguar"
] |
9c40aa93d2bcc9923fc9f48e459c246ba6fad1bbc46163ab6ffc9f381e9b2f81
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import GaussianMixture
# $example off$
from pyspark.sql import SparkSession
"""
A simple example demonstrating Gaussian Mixture Model (GMM).
Run with:
bin/spark-submit examples/src/main/python/ml/gaussian_mixture_example.py
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GaussianMixtureExample")\
.getOrCreate()
# $example on$
# loads data
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
gmm = GaussianMixture().setK(2).setSeed(538009335)
model = gmm.fit(dataset)
print("Gaussians shown as a DataFrame: ")
model.gaussiansDF.show(truncate=False)
# $example off$
spark.stop()
|
chgm1006/spark-app
|
src/main/python/ml/gaussian_mixture_example.py
|
Python
|
apache-2.0
| 1,570
|
[
"Gaussian"
] |
e2fc6cf695a2ddd723196a65213cdb651337112a6d8df8c729a4318b26945189
|
# -*- coding: utf-8 -*-
import os
import random
import pygame.display
import core
import engine
import platformsbuilder
class Level0(core.BaseLevel):
BACKGROUND_PATH = engine.background_path("wall.png")
def create_ennemies(self):
self.create_slow_fantom(250, 150)
def create_platforms(self):
platformsbuilder.platform0_builder(self)
class Level1(core.BaseLevel):
BACKGROUND_PATH = engine.background_path("bridge.png")
def create_ennemies(self):
self.create_fast_fantom(50, 50)
def create_platforms(self):
platformsbuilder.platform1_builder(self)
class Level2(core.BaseLevel):
BACKGROUND_PATH = engine.background_path("desert-1.png")
def create_ennemies(self):
self.create_slow_fantom(250, 50)
self.create_slow_fantom(450, 150)
def create_platforms(self):
platformsbuilder.platform2_builder(self)
class Level3(core.BaseLevel):
BACKGROUND_PATH = engine.background_path("desert-2.png")
def create_ennemies(self):
self.create_octopus(450, 50)
self.create_slow_fantom(250, 50)
self.create_slow_fantom(450, 150)
def create_platforms(self):
platformsbuilder.platform3_builder(self)
class Level4(core.BaseLevel):
BACKGROUND_PATH = engine.background_path("wall.png")
def create_ennemies(self):
self.create_octopus(450, 50)
self.create_left_bird(250)
self.create_right_bird(320)
def create_platforms(self):
platformsbuilder.platform4_builder(self)
LEVELS = {0: Level0,
1: Level1,
2: Level2,
3: Level3,
4: Level4,
}
class RandomLevel(core.BaseLevel):
def __init__(self, game):
backgrounds = os.listdir(engine.background_dir())
background_filename = random.choice(backgrounds)
self.BACKGROUND_PATH = engine.background_path(background_filename)
super(RandomLevel, self).__init__(game)
def create_platforms(self):
platformsbuilder.random_builder(self)
def create_ennemies(self):
for ennemy_id, qty in self.game.random_ennemies.items():
for i in range(qty):
self._create_ennemy(ennemy_id)
ennemy_id = random.choice( self.game.random_ennemies.keys() )
self._create_ennemy(ennemy_id)
self.game.random_ennemies[ennemy_id] += 1
def _create_ennemy(self, ennemy_id):
ennemy_creator = {
self.game.SLOW_FANTOM: self._create_slow_fantom,
self.game.FAST_FANTOM: self._create_fast_fantom,
self.game.OCTOPUS: self._create_octopus,
self.game.LEFT_BIRD: self._create_left_bird,
self.game.RIGHT_BIRD: self._create_right_bird,
}
ennemy_creator[ennemy_id]()
def _create_slow_fantom(self):
x = random.randint(0, self.screen.get_width() - 50)
y = random.randint(0, self.screen.get_height() - 200)
self.create_slow_fantom(x, y)
def _create_fast_fantom(self):
x = random.randint(0, self.screen.get_width() - 50)
y = random.randint(0, self.screen.get_height() - 150)
self.create_fast_fantom(x, y)
def _create_octopus(self):
x = random.randint(0, self.screen.get_width() - 50)
y = random.randint(125, 375)
self.create_octopus(x, y)
def _create_left_bird(self):
y = random.randint(0, self.screen.get_height() - 100)
self.create_left_bird(y)
def _create_right_bird(self):
y = random.randint(0, self.screen.get_height() - 100)
self.create_right_bird(y)
|
sblondon/jumpjump
|
src/levels/progress.py
|
Python
|
gpl-3.0
| 3,624
|
[
"Octopus"
] |
1efb721b2bdcba2de27102edbad472598224117585d22b132972cd6a1de9cb02
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.autograd as autograd
import torch.optim as optim
from torch.distributions import transform_to
import pyro.contrib.gp as gp
import pyro.optim
from pyro.infer import TraceEnum_ELBO
class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):
"""Performs Bayesian Optimization using a Gaussian Process as an
emulator for the unknown function.
"""
def __init__(self, constraints, gpmodel, num_acquisitions, acquisition_func=None):
"""
:param torch.constraint constraints: constraints defining the domain of `f`
:param gp.models.GPRegression gpmodel: a (possibly initialized) GP
regression model. The kernel, etc is specified via `gpmodel`.
:param int num_acquisitions: number of points to acquire at each step
:param function acquisition_func: a function to generate acquisitions.
It should return a torch.Tensor of new points to query.
"""
if acquisition_func is None:
acquisition_func = self.acquire_thompson
self.constraints = constraints
self.gpmodel = gpmodel
self.num_acquisitions = num_acquisitions
self.acquisition_func = acquisition_func
def update_posterior(self, X, y):
X = torch.cat([self.gpmodel.X, X])
y = torch.cat([self.gpmodel.y, y])
self.gpmodel.set_data(X, y)
optimizer = torch.optim.Adam(self.gpmodel.parameters(), lr=0.001)
gp.util.train(
self.gpmodel,
optimizer,
loss_fn=TraceEnum_ELBO(
strict_enumeration_warning=False
).differentiable_loss,
retain_graph=True,
)
def find_a_candidate(self, differentiable, x_init):
"""Given a starting point, `x_init`, takes one LBFGS step
to optimize the differentiable function.
:param function differentiable: a function amenable to torch
autograd
:param torch.Tensor x_init: the initial point
"""
# transform x to an unconstrained domain
unconstrained_x_init = transform_to(self.constraints).inv(x_init)
unconstrained_x = unconstrained_x_init.detach().clone().requires_grad_(True)
# TODO: Use LBFGS with line search by pytorch #8824 merged
minimizer = optim.LBFGS([unconstrained_x], max_eval=20)
def closure():
minimizer.zero_grad()
if (torch.log(torch.abs(unconstrained_x)) > 25.0).any():
return torch.tensor(float("inf"))
x = transform_to(self.constraints)(unconstrained_x)
y = differentiable(x)
autograd.backward(
unconstrained_x, autograd.grad(y, unconstrained_x, retain_graph=True)
)
return y
minimizer.step(closure)
# after finding a candidate in the unconstrained domain,
# convert it back to original domain.
x = transform_to(self.constraints)(unconstrained_x)
opt_y = differentiable(x)
return x.detach(), opt_y.detach()
def opt_differentiable(self, differentiable, num_candidates=5):
"""Optimizes a differentiable function by choosing `num_candidates`
initial points at random and calling :func:`find_a_candidate` on
each. The best candidate is returned with its function value.
:param function differentiable: a function amenable to torch autograd
:param int num_candidates: the number of random starting points to
use
:return: the minimiser and its function value
:rtype: tuple
"""
candidates = []
values = []
for j in range(num_candidates):
x_init = torch.empty(
1, dtype=self.gpmodel.X.dtype, device=self.gpmodel.X.device
).uniform_(self.constraints.lower_bound, self.constraints.upper_bound)
x, y = self.find_a_candidate(differentiable, x_init)
if torch.isnan(y):
continue
candidates.append(x)
values.append(y)
mvalue, argmin = torch.min(torch.cat(values), dim=0)
return candidates[argmin.item()], mvalue
def acquire_thompson(self, num_acquisitions=1, **opt_params):
"""Selects `num_acquisitions` query points at which to query the
original function by Thompson sampling.
:param int num_acquisitions: the number of points to generate
:param dict opt_params: additional parameters for optimization
routines
:return: a tensor of points to evaluate `loss` at
:rtype: torch.Tensor
"""
# Initialize the return tensor
X = self.gpmodel.X
X = torch.empty(num_acquisitions, *X.shape[1:], dtype=X.dtype, device=X.device)
for i in range(num_acquisitions):
sampler = self.gpmodel.iter_sample(noiseless=False)
x, _ = self.opt_differentiable(sampler, **opt_params)
X[i, ...] = x
return X
def get_step(self, loss, params, verbose=False):
X = self.acquisition_func(num_acquisitions=self.num_acquisitions)
y = loss(X)
if verbose:
print("Acquire at: X")
print(X)
print("y")
print(y)
self.update_posterior(X, y)
return self.opt_differentiable(lambda x: self.gpmodel(x)[0])
|
uber/pyro
|
examples/contrib/oed/gp_bayes_opt.py
|
Python
|
apache-2.0
| 5,456
|
[
"Gaussian"
] |
75c2d6eb5c224b219bb67a5ef25d8eb6c084232d66968541b841a9c36f75056b
|
import os, sys
import numpy as np
from ase.units import Hartree, Bohr
import inspect
class ORCA:
"""class for performing ORCA calculations, without inclusion of the
ORCA python wrapper"""
def __init__(self,label='orca',**kwargs):
"""input file hack will be put in here right now"""
self.label = label
self.converged = False
self.stress = np.empty((3, 3))
self.base_dir = os.getcwd()
try:
pathtoorca = kwargs.pop('pathtoorca')
self.pathtoorca = pathtoorca
except:
self.pathtoorca = '/home/frank/prog/orca/orca_x86_64_exe_r2360/orca'
try:
calc_dir = kwargs.pop('calc_dir')
self.calc_dir = self.base_dir + '/' + calc_dir
except:
self.calc_dir = self.base_dir
try:
prefix = kwargs.pop('prefix')
self.prefix = prefix
except:
self.prefix = 'tmp'
try:
basis_set = kwargs.pop('basis_set')
self.basis_set = basis_set
except:
self.basis_set = 'TZVP'
#nwchem has no stress ;-)
try:
method = kwargs.pop('method')
self.method = method
except:
self.method = 'DFT'
try:
riapprox = kwargs.pop('riapprox')
self.riapprox = riapprox
except:
self.riapprox = False
try:
printoptions = kwargs.pop('printoptions')
self.printoptions = printoptions
except:
self.printoptions = ''
try:
ncpu = kwargs.pop('ncpu')
self.ncpu = int(ncpu)
except:
self.ncpu = 1
try:
charge = kwargs.pop('charge')
self.charge = int(charge)
except:
self.charge = 0
try:
multiplicity = kwargs.pop('multiplicity')
self.multiplicity = int(multiplicity)
except:
self.multiplicity = 1
try:
dummies = kwargs.pop('dummies')
self.dummies = dummies
except:
self.dummies = None
try:
stuff = kwargs.pop('stuff')
self.stuff = stuff
except:
self.stuff = ''
try:
spindens = kwargs.pop('spindens')
self.spindens = spindens
except:
self.spindens = False
def update(self, atoms):
if (not self.converged or
len(self.numbers) != len(atoms) or
(self.numbers != atoms.get_atomic_numbers()).any()):
self.initialize(atoms)
self.calculate(atoms)
elif ((self.positions != atoms.get_positions()).any() or
(self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any()):
self.calculate(atoms)
def initialize(self, atoms):
self.numbers = atoms.get_atomic_numbers().copy()
self.species = []
for a, Z in enumerate(self.numbers):
self.species.append(Z)
self.converged = False
def get_potential_energy(self, atoms):
self.energy = False
self.gradient = True
#self.gradient = False
self.update(atoms)
return self.etotal
def get_total_energy(self, atoms):
self.energy = True
self.gradient = False
self.update(atoms)
return self.etotal
def get_forces(self, atoms):
self.energy = False
self.gradient = True
self.update(atoms)
return self.forces.copy()
def get_stress(self, atoms):
self.update(atoms)
return self.stress.copy()
def calculate(self, atoms):
self.positions = atoms.get_positions().copy()
self.cell = atoms.get_cell().copy()
self.pbc = atoms.get_pbc().copy()
if self.calc_dir != self.base_dir:
try:
os.mkdir(self.calc_dir)
except:
print "dir '%s' already exists - continuing" %self.calc_dir
if not self.spindens:
os.chdir(self.calc_dir)
self.write_orca_input(atoms)
os.system('%s %s.input > %s.output' %(self.pathtoorca, self.prefix, self.prefix))
os.system('cat %s.output >> all.output' %self.prefix)
os.chdir(self.base_dir)
else:
self.calculate_spin_density(atoms, "sdens.cube")
os.chdir(self.calc_dir)
self.read_energy()
if self.gradient:
self.read_forces(atoms)
os.chdir(self.base_dir)
self.converged = True
def calculate_spin_density(self, atoms, filename):
self.positions = atoms.get_positions().copy()
self.cell = atoms.get_cell().copy()
self.pbc = atoms.get_pbc().copy()
if self.calc_dir != self.base_dir:
try:
os.mkdir(self.calc_dir)
except:
print "dir '%s' already exists - continuing" %self.calc_dir
os.chdir(self.calc_dir)
interm = self.stuff
self.stuff += '\n%%plots\nFormat Gaussian_Cube\nSpinDens("%s");\nend\n' %filename
self.write_orca_input(atoms)
os.system('%s %s.input > %s.output' %(self.pathtoorca, self.prefix, self.prefix))
self.stuff = interm
os.chdir(self.base_dir)
def read_energy(self):
f = open('%s.output' %self.prefix,'r')
lines = f.readlines()
f.close()
for line in range(len(lines)):
if lines[line].startswith('FINAL SINGLE POINT ENERGY'):
self.etotal = float(lines[line].split()[-1])*Hartree
def read_forces(self,atoms): #todo
f = open('%s.output' %self.prefix,'r')
lines = f.readlines()
f.close()
self.forces = np.array([],dtype='float64')
if (self.method == 'HF') or (self.method == 'DFT'):
grad = lines.index('CARTESIAN GRADIENT\n')
elif self.method.endswith('MP2'):
grad = lines.index('The final MP2 gradient\n')-2
forces = lines[grad+3:(grad+3+np.shape(atoms.positions)[0])]
#print forces[0].split()
self.forces = np.empty((len(forces),3))
i = 0
for line in forces:
if (self.method == 'HF') or (self.method == 'DFT'):
grads = line.split()[3:6]
elif self.method.endswith('MP2'):
grads = line.split()[1:4]
#print grads
gradx = np.float(grads[0])
grady = np.float(grads[1])
gradz = np.float(grads[2])
self.forces[i,:] = -(np.array([gradx, grady, gradz]))*Hartree/Bohr
#self.forces[i,:] = np.array([gradx, grady, gradz]) #pure forces from ORCA
i += 1
#print self.forces
#check for units of forces and sign (prefactor TM=Hartree/Bohr)
def read(self):
"""Dummy stress for NWChem"""
self.stress = np.empty((3, 3))
def write_orca_input(self, atoms):
"""writes orca input inputfile consisting of method, basis set and print options"""
inputfile = open('%s.input' %self.prefix, 'w')
if self.riapprox:
riprefix = 'RI'
else:
riprefix = ''
inputfile.write('! %s %s %s\n' %(riprefix, self.method, self.basis_set))
#if self.gradient:
print 'running gradient'
inputfile.write('! EnGrad TightSCF\n')
if self.ncpu > 1:
inputfile.write('%s \n nprocs %s \n end \n' %(str('%pal'), self.ncpu))
inputfile.write('\n'+self.stuff+'\n')
symbols = atoms.get_chemical_symbols()
inputfile.write('*xyz %s %s \n' %(str(self.charge), str(self.multiplicity)))
for i in range(np.shape(atoms.positions)[0]):
inputfile.write('%s %f %f %f\n' %(symbols[i], atoms.positions[i,0], atoms.positions[i,1], atoms.positions[i,2]))
if self.dummies != None:
inputfile.write(self.dummies)
inputfile.write('*')
inputfile.close()
|
PHOTOX/fuase
|
ase/ase/calculators/old-orca.py
|
Python
|
gpl-2.0
| 8,101
|
[
"ASE",
"NWChem",
"ORCA"
] |
9792412fc7d6c2a64f1695553e97254e8c7110a10976511b6cffcf4cb3d3e17d
|
from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, sigma, channel):
_1D_window = gaussian(window_size, sigma).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
class MS_SSIM(torch.nn.Module):
def __init__(self, size_average=True, max_val=255):
super(MS_SSIM, self).__init__()
self.size_average = size_average
self.channel = 3
self.max_val = max_val
def _ssim(self, img1, img2, size_average=True):
_, c, w, h = img1.size()
window_size = min(w, h, 11)
sigma = 1.5 * window_size / 11
window = create_window(window_size, sigma, self.channel).cuda(img1.get_device())
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=self.channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=self.channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=self.channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=self.channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=self.channel) - mu1_mu2
C1 = (0.01 * self.max_val) ** 2
C2 = (0.03 * self.max_val) ** 2
V1 = 2.0 * sigma12 + C2
V2 = sigma1_sq + sigma2_sq + C2
ssim_map = ((2 * mu1_mu2 + C1) * V1) / ((mu1_sq + mu2_sq + C1) * V2)
mcs_map = V1 / V2
if size_average:
return ssim_map.mean(), mcs_map.mean()
def ms_ssim(self, img1, img2, levels=5):
weight = Variable(torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).cuda(img1.get_device()))
msssim = Variable(torch.Tensor(levels, ).cuda(img1.get_device()))
mcs = Variable(torch.Tensor(levels, ).cuda(img1.get_device()))
for i in range(levels):
ssim_map, mcs_map = self._ssim(img1, img2)
msssim[i] = ssim_map
mcs[i] = mcs_map
filtered_im1 = F.avg_pool2d(img1, kernel_size=2, stride=2)
filtered_im2 = F.avg_pool2d(img2, kernel_size=2, stride=2)
img1 = filtered_im1
img2 = filtered_im2
value = (torch.prod(mcs[0:levels - 1] ** weight[0:levels - 1]) * (msssim[levels - 1] ** weight[levels - 1]))
return value
def forward(self, img1, img2):
return self.ms_ssim(img1, img2)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
|
ECP-CANDLE/Benchmarks
|
examples/image-vae/utils.py
|
Python
|
mit
| 3,249
|
[
"Gaussian"
] |
9698c3426e2db69570903752b22415194c598992b3aa547fe1f087411672c2e4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains classes and functions for working with the various RMG databases. In
particular, this module is devoted to functionality that is common across all
components of the RMG database.
"""
import os
import logging
import re
import codecs
try:
from collections import OrderedDict
except ImportError:
logging.warning("Upgrade to Python 2.7 or later to ensure your database entries are read and written in the same order each time!")
OrderedDict = dict
from rmgpy.molecule import Molecule, Group
from rmgpy.molecule.adjlist import InvalidAdjacencyListError
from reference import Reference, Article, Book, Thesis
################################################################################
class DatabaseError(Exception):
"""
A exception that occurs when working with an RMG database. Pass a string
giving specifics about the exceptional behavior.
"""
pass
################################################################################
class Entry(object):
"""
A class for representing individual records in an RMG database. Each entry
in the database associates a chemical item (generally a species, functional
group, or reaction) with a piece of data corresponding to that item. A
significant amount of metadata can also be stored with each entry.
The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`index` A unique nonnegative integer index for the entry
`label` A unique string identifier for the entry (or '' if not used)
`item` The item that this entry represents
`parent` The parent of the entry in the hierarchy (or ``None`` if not used)
`children` A list of the children of the entry in the hierarchy (or ``None`` if not used)
`data` The data to associate with the item
`reference` A :class:`Reference` object containing bibliographic reference information to the source of the data
`referenceType` The way the data was determined: ``'theoretical'``, ``'experimental'``, or ``'review'``
`shortDesc` A brief (one-line) description of the data
`longDesc` A long, verbose description of the data
`rank` An integer indicating the degree of confidence in the entry data, or ``None`` if not used
=================== ========================================================
"""
def __init__(self,
index=-1,
label='',
item=None,
parent=None,
children=None,
data=None,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
rank=None,
):
self.index = index
self.label = label
self.item = item
self.parent = parent
self.children = children or []
self.data = data
self.reference = reference
self.referenceType = referenceType
self._shortDesc = unicode(shortDesc)
self._longDesc = unicode(longDesc)
self.rank = rank
def __str__(self):
return self.label
def __repr__(self):
return '<Entry index={0:d} label="{1}">'.format(self.index, self.label)
@property
def longDesc(self):
return self._longDesc
@longDesc.setter
def longDesc(self, value):
if value is None:
self._longDesc = None
else:
self._longDesc = unicode(value)
@property
def shortDesc(self):
return self._shortDesc
@shortDesc.setter
def shortDesc(self, value):
if value is None:
self._shortDesc = None
else:
self._shortDesc = unicode(value)
################################################################################
class Database:
"""
An RMG-style database, consisting of a dictionary of entries (associating
items with data), and an optional tree for assigning a hierarchy to the
entries. The use of the tree enables the database to be easily extensible
as more parameters are available.
In constructing the tree, it is important to develop a hierarchy such that
siblings are mutually exclusive, to ensure that there is a unique path of
descent down a tree for each structure. If non-mutually exclusive siblings
are encountered, a warning is raised and the parent of the siblings is
returned.
There is no requirement that the children of a node span the range of
more specific permutations of the parent. As the database gets more complex,
attempting to maintain complete sets of children for each parent in each
database rapidly becomes untenable, and is against the spirit of
extensibility behind the database development.
You must derive from this class and implement the :meth:`loadEntry`,
:meth:`saveEntry`, :meth:`processOldLibraryEntry`, and
:meth:`generateOldLibraryEntry` methods in order to load and save from the
new and old database formats.
"""
local_context = {}
local_context['Reference'] = Reference
local_context['Article'] = Article
local_context['Book'] = Book
local_context['Thesis'] = Thesis
def __init__(self,
entries=None,
top=None,
label='',
name='',
solvent=None,
shortDesc='',
longDesc='',
):
self.entries = OrderedDict(entries or {})
self.top = top or []
self.label = label
self.name = name
self.solvent = solvent
self.shortDesc = shortDesc
self.longDesc = longDesc
def load(self, path, local_context=None, global_context=None):
"""
Load an RMG-style database from the file at location `path` on disk.
The parameters `local_context` and `global_context` are used to
provide specialized mapping of identifiers in the input file to
corresponding functions to evaluate. This method will automatically add
a few identifiers required by all data entries, so you don't need to
provide these.
"""
# Collision efficiencies are in SMILES format, so we'll need RDKit
# to convert them to Molecule objects
# Do the import here to ensure it is imported from a pure Python
# environment (as opposed to a Cythonized environment, which is not
# allowed during an exec() call)
from rdkit import Chem
# Clear any previously-loaded data
self.entries = OrderedDict()
self.top = []
# Set up global and local context
if global_context is None: global_context = {}
global_context['__builtins__'] = None
global_context['True'] = True
global_context['False'] = False
if local_context is None: local_context = {}
local_context['__builtins__'] = None
local_context['entry'] = self.loadEntry
local_context['tree'] = self.__loadTree
local_context['name'] = self.name
local_context['solvent'] = self.solvent
local_context['shortDesc'] = self.shortDesc
local_context['longDesc'] = self.longDesc
# add in anything from the Class level dictionary.
for key, value in Database.local_context.iteritems():
local_context[key]=value
# Process the file
f = open(path, 'r')
try:
exec f in global_context, local_context
except Exception, e:
logging.error('Error while reading database {0!r}.'.format(path))
raise
f.close()
# Extract the database metadata
self.name = local_context['name']
self.solvent = local_context['solvent']
self.shortDesc = local_context['shortDesc']
self.longDesc = local_context['longDesc'].strip()
# Return the loaded database (to allow for Database().load() syntax)
return self
def getEntriesToSave(self):
"""
Return a sorted list of the entries in this database that should be
saved to the output file.
"""
entries = self.top[:]
if len(self.top) > 0:
# Save the entries in the same order as the tree (so that it saves
# in the same order each time)
for entry in self.top:
entries.extend(self.descendants(entry))
# It may be that a logical or is defined such that its children
# are not in the tree; this ensures that they still get saved
index = 0
while index < len(entries):
entry = entries[index]
if isinstance(entry.item, LogicOr):
descendants = self.descendants(entry)
for child in entry.item.components:
if self.entries[child] not in descendants:
entries.append(self.entries[child])
index += 1
else:
# Otherwise save the entries sorted by index, if defined
entries = self.entries.values()
entries.sort(key=lambda x: (x.index))
return entries
def getSpecies(self, path):
"""
Load the dictionary containing all of the species in a kinetics library or depository.
"""
from rmgpy.species import Species
speciesDict = {}
with open(path, 'r') as f:
adjlist = ''
for line in f:
if line.strip() == '' and adjlist.strip() != '':
# Finish this adjacency list
species = Species().fromAdjacencyList(adjlist)
species.generateResonanceIsomers()
label = species.label
if label in speciesDict:
raise DatabaseError('Species label "{0}" used for multiple species in {1}.'.format(label, str(self)))
speciesDict[label] = species
adjlist = ''
else:
adjlist += line
return speciesDict
def saveDictionary(self, path):
"""
Extract species from all entries associated with a kinetics library or depository and save them
to the path given.
"""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
# Extract species from all the entries
speciesDict = {}
entries = self.entries.values()
for entry in entries:
for reactant in entry.item.reactants:
if reactant.label not in speciesDict:
speciesDict[reactant.label] = reactant
for product in entry.item.products:
if product.label not in speciesDict:
speciesDict[product.label] = product
with open(path, 'w') as f:
for label in speciesDict.keys():
f.write(speciesDict[label].molecule[0].toAdjacencyList(label=label, removeH=False))
f.write('\n')
def save(self, path):
"""
Save the current database to the file at location `path` on disk.
"""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
entries = self.getEntriesToSave()
f = codecs.open(path, 'w', 'utf-8')
f.write('#!/usr/bin/env python\n')
f.write('# encoding: utf-8\n\n')
f.write('name = "{0}"\n'.format(self.name))
f.write('shortDesc = u"{0}"\n'.format(self.shortDesc))
f.write('longDesc = u"""\n')
f.write(self.longDesc.strip() + '\n')
f.write('"""\n')
for entry in entries:
self.saveEntry(f, entry)
# Write the tree
if len(self.top) > 0:
f.write('tree(\n')
f.write('"""\n')
f.write(self.generateOldTree(self.top, 1))
f.write('"""\n')
f.write(')\n\n')
f.close()
def loadOld(self, dictstr, treestr, libstr, numParameters, numLabels=1, pattern=True):
"""
Load a dictionary-tree-library based database. The database is stored
in three files: `dictstr` is the path to the dictionary, `treestr` to
the tree, and `libstr` to the library. The tree is optional, and should
be set to '' if not desired.
"""
# Load dictionary, library, and (optionally) tree
try:
self.loadOldDictionary(dictstr, pattern)
except Exception, e:
logging.error('Error while reading database {0!r}.'.format(os.path.dirname(dictstr)))
raise
try:
if treestr != '': self.loadOldTree(treestr)
except Exception, e:
logging.error('Error while reading database {0!r}.'.format(os.path.dirname(treestr)))
raise
try:
self.loadOldLibrary(libstr, numParameters, numLabels)
except Exception, e:
logging.error('Error while reading database {0!r}.'.format(os.path.dirname(libstr)))
raise
return self
def loadOldDictionary(self, path, pattern):
"""
Parse an old-style RMG database dictionary located at `path`. An RMG
dictionary is a list of key-value pairs of a one-line string key and a
multi-line string value. Each record is separated by at least one empty
line. Returns a ``dict`` object with the values converted to
:class:`Molecule` or :class:`Group` objects depending on the
value of `pattern`.
"""
# The dictionary being loaded
self.entries = {}
# The current record
record = ''
fdict=None
# Process the dictionary file
try:
fdict = open(path, 'r')
for line in fdict:
line = line.strip()
# If at blank line, end of record has been found
if len(line) == 0 and len(record) > 0:
# Label is first line of record
lines = record.splitlines()
label = lines[0]
# Add record to dictionary
self.entries[label] = Entry(label=label, item=record)
# Clear record in preparation for next iteration
record = ''
# Otherwise append line to record (if not empty and not a comment line)
else:
line = removeCommentFromLine(line).strip()
if len(line) > 0:
record += line + '\n'
# process the last record! (after end of for loop)
# Label is first line of record
if record:
label = record.splitlines()[0]
# Add record to dictionary
self.entries[label] = Entry(label=label, item=record)
except DatabaseError, e:
logging.exception(str(e))
raise
except IOError, e:
logging.exception('Database dictionary file "' + e.filename + '" not found.')
raise
finally:
if fdict: fdict.close()
# Convert the records in the dictionary to Molecule, Group, or
# logical objects
try:
for label in self.entries:
record = self.entries[label].item
lines = record.splitlines()
# If record is a logical node, make it into one.
if re.match("(?i)\s*(NOT\s)?\s*(OR|AND|UNION)\s*(\{.*\})", lines[1]):
self.entries[label].item = makeLogicNode(' '.join(lines[1:]) )
# Otherwise convert adjacency list to molecule or pattern
elif pattern:
self.entries[label].item = Group().fromAdjacencyList(record)
else:
self.entries[label].item = Molecule().fromAdjacencyList(record,saturateH=True)
except InvalidAdjacencyListError, e:
logging.error('Error while loading old-style dictionary "{0}"'.format(path))
logging.error('Error occurred while parsing adjacency list "{0}"'.format(label))
raise
def __loadTree(self, tree):
"""
Parse an old-style RMG tree located at `tree`. An RMG tree is an n-ary
tree representing the hierarchy of items in the dictionary.
"""
if len(self.entries) == 0:
raise DatabaseError("Load the dictionary before you load the tree.")
# should match ' L3 : foo_bar ' and 'L3:foo_bar'
parser = re.compile('^\s*L(?P<level>\d+)\s*:\s*(?P<label>\S+)')
parents = [None]
for line in tree.splitlines():
line = removeCommentFromLine(line).strip()
if len(line) > 0:
# Extract level
match = parser.match(line)
if not match:
raise DatabaseError("Couldn't parse line '{0}'".format(line.strip()))
level = int(match.group('level'))
label = match.group('label')
# Find immediate parent of the new node
parent = None
if len(parents) < level:
raise DatabaseError("Invalid level specified in line '{0}'".format(line.strip()))
else:
while len(parents) > level:
parents.remove(parents[-1])
if len(parents) > 0:
parent = parents[level-1]
if parent is not None: parent = self.entries[parent]
try:
entry = self.entries[label]
except KeyError:
raise DatabaseError('Unable to find entry "{0}" from tree in dictionary.'.format(label))
if isinstance(parent, str):
raise DatabaseError('Unable to find parent entry "{0}" of entry "{1}" in tree.'.format(parent, label))
# Update the parent and children of the nodes accordingly
if parent is not None:
entry.parent = parent
parent.children.append(entry)
else:
entry.parent = None
self.top.append(entry)
# Add node to list of parents for subsequent iteration
parents.append(label)
def loadOldTree(self, path):
"""
Parse an old-style RMG database tree located at `path`. An RMG
tree is an n-ary tree representing the hierarchy of items in the
dictionary.
"""
tree = []
try:
ftree = open(path, 'r')
tree = ftree.read()
except IOError, e:
logging.exception('Database tree file "' + e.filename + '" not found.')
finally:
ftree.close()
self.__loadTree(tree)
def loadOldLibrary(self, path, numParameters, numLabels=1):
"""
Parse an RMG database library located at `path`.
"""
if len(self.entries) == 0:
raise DatabaseError("Load the dictionary before you load the library.")
entries = self.parseOldLibrary(path, numParameters, numLabels)
# Load the parsed entries into the database, skipping duplicate entries
skippedCount = 0
for index, label, parameters, comment in entries:
if label not in self.entries:
raise DatabaseError('Entry {0!r} in library was not found in dictionary.'.format(label))
if self.entries[label].index != -1:
# The entry is a duplicate, so skip it
logging.debug("There was already something labeled {0} in the {1} library. Ignoring '{2}' ({3})".format(label, self.label, index, parameters))
skippedCount += 1
else:
# The entry is not a duplicate
self.entries[label].index = index
self.entries[label].data = parameters
self.entries[label].shortDesc = comment
if skippedCount > 0:
logging.warning("Skipped {0:d} duplicate entries in {1} library.".format(skippedCount, self.label))
# Make sure each entry with data has a nonnegative index
entries2 = self.entries.values()
entries2.sort(key=lambda entry: entry.index)
index = entries2[-1].index + 1
if index < 1: index = 1
for index0, label, parameters, comment in entries:
if self.entries[label].index < 0:
self.entries[label].index = index
index += 1
def parseOldLibrary(self, path, numParameters, numLabels=1):
"""
Parse an RMG database library located at `path`, returning the loaded
entries (rather than storing them in the database). This method does
not discard duplicate entries.
"""
entries = []
flib = None
try:
flib = codecs.open(path, 'r', 'utf-8', errors='replace')
for line in flib:
line = removeCommentFromLine(line).strip()
if len(line) > 0:
info = line.split()
# Skip if the number of items on the line is invalid
if len(info) < 2:
continue
# Determine if the first item is an index
# This index is optional in the old library format
index = -1
offset = 0
try:
index = int(float(info[0]))
offset = 1
except ValueError:
pass
# Extract label(s)
label = self.__hashLabels(info[offset:offset+numLabels])
offset += numLabels
# Extract numeric parameter(s) or label of node with data to use
if numParameters < 0:
parameters = self.processOldLibraryEntry(info[offset:])
comment = ''
else:
try:
parameters = self.processOldLibraryEntry(info[offset:offset+numParameters])
offset += numParameters
except (IndexError, ValueError), e:
parameters = info[offset]
offset += 1
# Remaining part of string is comment
comment = ' '.join(info[offset:])
comment = comment.strip('"')
entries.append((index, label, parameters, comment))
except DatabaseError, e:
logging.exception(str(e))
logging.exception("path = '{0}'".format(path))
logging.exception("line = '{0}'".format(line))
raise
except IOError, e:
logging.exception('Database library file "' + e.filename + '" not found.')
raise
finally:
if flib: flib.close()
return entries
def saveOld(self, dictstr, treestr, libstr):
"""
Save the current database to a set of text files using the old-style
syntax.
"""
self.saveOldDictionary(dictstr)
if treestr != '':
self.saveOldTree(treestr)
# RMG-Java does not require a frequencies_groups/Library.txt file to
# operate, but errors are raised upon importing to Py if this file is
# not found. This check prevents the placeholder from being discarded.
if 'StatesGroups' not in self.__class__.__name__:
self.saveOldLibrary(libstr)
def saveOldDictionary(self, path):
"""
Save the current database dictionary to a text file using the old-style
syntax.
"""
entries = []
entriesNotInTree = []
# If we have tree information, save the dictionary in the same order as
# the tree (so that it saves in the same order each time)
def getLogicNodeComponents(entry_or_item):
"""
If we want to save an entry, but that is a logic node, we also want
to save its components, recursively. This is a horribly complicated way
to *not* save in the dictionary any things which are not accessed from
(or needed to define things that are accessed from) the tree.
"""
if isinstance(entry_or_item, Entry):
entry = entry_or_item
item = entry.item
nodes = [entry]
else:
entry = None
item = entry_or_item
nodes = []
if isinstance(item, LogicNode):
for child in item.components:
if isinstance(child, LogicNode):
nodes.extend(getLogicNodeComponents(child))
else:
nodes.extend(getLogicNodeComponents(self.entries[child]))
return nodes
else:
return [entry]
if len(self.top) > 0:
for entry in self.top:
entries.extend(getLogicNodeComponents(entry))
for descendant in self.descendants(entry):
for entry2 in getLogicNodeComponents(descendant):
if entry2 not in entries:
entries.append(entry2)
# Don't forget entries that aren't in the tree
for entry in self.entries.values():
if entry not in entries:
entriesNotInTree.append(entry)
entriesNotInTree.sort(key=lambda x: (x.index, x.label))
# Otherwise save the dictionary in any order
else:
# Save the library in order by index
entries = self.entries.values()
entries.sort(key=lambda x: (x.index, x.label))
def comment(s):
"Return the string, with each line prefixed with '// '"
return '\n'.join('// ' + line if line else '' for line in s.split('\n'))
try:
f = open(path, 'w')
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('//\n')
f.write('// {0} dictionary\n'.format(self.name))
f.write('//\n')
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('\n')
for entry in entries:
f.write(entry.label + '\n')
if isinstance(entry.item, Molecule):
try:
f.write(entry.item.toAdjacencyList(removeH=True, oldStyle=True) + '\n')
except InvalidAdjacencyListError:
f.write("// Couldn't save in old syntax adjacency list. Here it is in new syntax:\n")
f.write(comment(entry.item.toAdjacencyList(removeH=False, oldStyle=False) + '\n'))
elif isinstance(entry.item, Group):
f.write(entry.item.toAdjacencyList(oldStyle=True).replace('{2S,2T}', '2') + '\n')
elif isinstance(entry.item, LogicOr):
f.write('{0}\n\n'.format(entry.item).replace('OR{', 'Union {'))
elif entry.label[0:7] == 'Others-':
assert isinstance(entry.item, LogicNode)
f.write('{0}\n\n'.format(entry.item))
else:
raise DatabaseError('Unexpected item with label {0} encountered in dictionary while attempting to save.'.format(entry.label))
if entriesNotInTree:
f.write(comment("These entries do not appear in the tree:\n\n"))
for entry in entriesNotInTree:
f.write(comment(entry.label + '\n'))
if isinstance(entry.item, Molecule):
f.write(comment(entry.item.toAdjacencyList(removeH=False) + '\n'))
elif isinstance(entry.item, Group):
f.write(comment(entry.item.toAdjacencyList().replace('{2S,2T}','2') + '\n'))
elif isinstance(entry.item, LogicOr):
f.write(comment('{0}\n\n'.format(entry.item).replace('OR{', 'Union {')))
elif entry.label[0:7] == 'Others-':
assert isinstance(entry.item, LogicNode)
f.write(comment('{0}\n\n'.format(entry.item)))
else:
raise DatabaseError('Unexpected item with label {0} encountered in dictionary while attempting to save.'.format(entry.label))
f.close()
except IOError, e:
logging.exception('Unable to save old-style dictionary to "{0}".'.format(os.path.abspath(path)))
raise
def generateOldTree(self, entries, level):
"""
Generate a multi-line string representation of the current tree using
the old-style syntax.
"""
string = ''
for entry in entries:
# Write current node
string += '{0}L{1:d}: {2}\n'.format(' ' * (level-1), level, entry.label)
# Recursively descend children (depth-first)
string += self.generateOldTree(entry.children, level+1)
return string
def saveOldTree(self, path):
"""
Save the current database tree to a text file using the old-style
syntax.
"""
try:
f = open(path, 'w')
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('//\n')
f.write('// {0} tree\n'.format(self.name))
f.write('//\n')
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('\n')
f.write(self.generateOldTree(self.top, 1))
f.close()
except IOError, e:
logging.exception('Unable to save old-style tree to "{0}".'.format(os.path.abspath(path)))
raise
def saveOldLibrary(self, path):
"""
Save the current database library to a text file using the old-style
syntax.
"""
try:
# Save the library in order by index
entries = self.entries.values()
entries.sort(key=lambda x: x.index)
f = codecs.open(path, 'w', 'utf-8')
records = []
for entry in entries:
if entry.data is not None:
data = entry.data
if not isinstance(data, str):
data = self.generateOldLibraryEntry(data)
records.append((entry.index, [entry.label], data, entry.shortDesc))
records.sort()
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('//\n')
f.write('// {0} library\n'.format(self.name))
f.write('//\n')
f.write('////////////////////////////////////////////////////////////////////////////////\n')
f.write('\n')
for index, labels, data, comment in records:
f.write('{:<6d} '.format(index))
for label in labels:
f.write('{:<32s} '.format(label))
if isinstance(data, basestring):
f.write('{:s} '.format(data))
else:
f.write('{:s} '.format(' '.join(['{:<10g}'.format(d) for d in data])))
f.write(u' {:s}\n'.format(comment))
f.close()
except IOError, e:
logging.exception('Unable to save old-style library to "{0}".'.format(os.path.abspath(path)))
raise
def __hashLabels(self, labels):
"""
Convert a list of string `labels` to a list of single strings that
represent permutations of the individual strings in the `labels` list::
>>> hashLabels(['a','b'])
['a;b', 'b;a']
"""
return ';'.join(labels)
def ancestors(self, node):
"""
Returns all the ancestors of a node, climbing up the tree to the top.
"""
if isinstance(node, str): node = self.entries[node]
ancestors = []
parent = node.parent
if parent is not None:
ancestors = [parent]
ancestors.extend(self.ancestors(parent))
return ancestors
def descendants(self, node):
"""
Returns all the descendants of a node, climbing down the tree to the bottom.
"""
if isinstance(node, str): node = self.entries[node]
descendants = []
for child in node.children:
descendants.append(child)
descendants.extend(self.descendants(child))
return descendants
def matchNodeToNode(self, node, nodeOther):
"""
Return `True` if `node` and `nodeOther` are identical. Otherwise, return `False`.
Both `node` and `nodeOther` must be Entry types with items containing Group or LogicNode types.
"""
if isinstance(node.item, Group) and isinstance(nodeOther.item, Group):
return self.matchNodeToStructure(node,nodeOther.item, atoms=nodeOther.item.getLabeledAtoms()) and self.matchNodeToStructure(nodeOther,node.item,atoms=node.item.getLabeledAtoms())
elif isinstance(node.item,LogicOr) and isinstance(nodeOther.item,LogicOr):
return node.item.matchLogicOr(nodeOther.item)
else:
# Assume nonmatching
return False
def matchNodeToChild(self, parentNode, childNode):
"""
Return `True` if `parentNode` is a parent of `childNode`. Otherwise, return `False`.
Both `parentNode` and `childNode` must be Entry types with items containing Group or LogicNode types.
If `parentNode` and `childNode` are identical, the function will also return `False`.
"""
if isinstance(parentNode.item, Group) and isinstance(childNode.item, Group):
if self.matchNodeToStructure(parentNode,childNode.item, atoms=childNode.item.getLabeledAtoms()) is True:
if self.matchNodeToStructure(childNode,parentNode.item, atoms=parentNode.item.getLabeledAtoms()) is False:
return True
return False
#If the parentNode is a Group and the childNode is a LogicOr there is nothing to check,
#except that the parent is listed in the attributes. However, we do need to check that everything down this
#family line is consistent, which is done in the databaseTest unitTest
elif isinstance(parentNode.item, Group) and isinstance(childNode.item, LogicOr):
return childNode.parent is parentNode
elif isinstance(parentNode.item,LogicOr):
return childNode.label in parentNode.item.components
def matchNodeToStructure(self, node, structure, atoms, strict=False):
"""
Return :data:`True` if the `structure` centered at `atom` matches the
structure at `node` in the dictionary. The structure at `node` should
have atoms with the appropriate labels because they are set on loading
and never change. However, the atoms in `structure` may not have the
correct labels, hence the `atoms` parameter. The `atoms` parameter may
include extra labels, and so we only require that every labeled atom in
the functional group represented by `node` has an equivalent labeled
atom in `structure`.
Matching to structure is more strict than to node. All labels in structure must
be found in node. However the reverse is not true, unless `strict` is set to True.
=================== ========================================================
Attribute Description
=================== ========================================================
`node` Either an Entry or a key in the self.entries dictionary which has a Group or LogicNode as its Entry.item
`structure` A Group or a Molecule
`atoms` Dictionary of {label: atom} in the structure. A possible dictionary is the one produced by structure.getLabeledAtoms()
`strict` If set to ``True``, ensures that all the node's atomLabels are matched by in the structure
=================== ========================================================
"""
if isinstance(node, str): node = self.entries[node]
group = node.item
if isinstance(group, LogicNode):
return group.matchToStructure(self, structure, atoms, strict)
else:
# try to pair up labeled atoms
centers = group.getLabeledAtoms()
initialMap = {}
for label in centers.keys():
# Make sure the labels are in both group and structure.
if label not in atoms:
logging.log(0, "Label {0} is in group {1} but not in structure".format(label, node))
if strict:
# structure must match all labeled atoms in node if strict is set to True
return False
continue # with the next label - ring structures might not have all labeled atoms
center = centers[label]
atom = atoms[label]
# Make sure labels actually point to atoms.
if center is None or atom is None:
return False
if isinstance(center, list):
# Currently, no node in the database should have duplicate labels
# The capability to have duplicate labels in Group() exists but does not have any functionality.
raise DatabaseError('Nodes in the database should not have duplicate labels. Node {0} does.'.format(node))
# Semantic check #1: atoms with same label are equivalent
if not atom.isSpecificCaseOf(center):
return False
# Semantic check #2: labeled atoms that share bond in the group (node)
# also share equivalent (or more specific) bond in the structure
for atom2, atom1 in initialMap.iteritems():
if group.hasBond(center, atom1) and structure.hasBond(atom, atom2):
bond1 = group.getBond(center, atom1) # bond1 is group
bond2 = structure.getBond(atom, atom2) # bond2 is structure
if not bond2.isSpecificCaseOf(bond1):
return False
elif group.hasBond(center, atom1): # but structure doesn't
return False
# but we don't mind if...
elif structure.hasBond(atom, atom2): # but group doesn't
logging.debug("We don't mind that structure "+ str(structure) +
" has bond but group {0} doesn't".format(node))
# Passed semantic checks, so add to maps of already-matched atoms
initialMap[atom] = center
# Labeled atoms in the structure that are not in the group should
# not be considered in the isomorphism check, so flag them temporarily
# Without this we would hit a lot of nodes that are ambiguous
flaggedAtoms = [atom for label, atom in structure.getLabeledAtoms().iteritems() if label not in centers]
for atom in flaggedAtoms: atom.ignore = True
# use mapped (labeled) atoms to try to match subgraph
result = structure.isSubgraphIsomorphic(group, initialMap)
# Restore atoms flagged in previous step
for atom in flaggedAtoms: atom.ignore = False
return result
def descendTree(self, structure, atoms, root=None, strict=False):
"""
Descend the tree in search of the functional group node that best
matches the local structure around `atoms` in `structure`.
If root=None then uses the first matching top node.
Returns None if there is no matching root.
Set strict to ``True`` if all labels in final matched node must match that of the
structure. This is used in kinetics groups to find the correct reaction template, but
not generally used in other GAVs due to species generally not being prelabeled.
"""
if root is None:
for root in self.top:
if self.matchNodeToStructure(root, structure, atoms, strict):
break # We've found a matching root
else: # didn't break - matched no top nodes
return None
elif not self.matchNodeToStructure(root, structure, atoms, strict):
return None
next = []
for child in root.children:
if self.matchNodeToStructure(child, structure, atoms, strict):
next.append(child)
if len(next) == 1:
return self.descendTree(structure, atoms, next[0], strict)
elif len(next) == 0:
if len(root.children) > 0 and root.children[-1].label.startswith('Others-'):
return root.children[-1]
else:
return root
else:
#logging.warning('For {0}, a node {1} with overlapping children {2} was encountered in tree with top level nodes {3}. Assuming the first match is the better one.'.format(structure, root, next, self.top))
return self.descendTree(structure, atoms, next[0], strict)
def areSiblings(self, node, nodeOther):
"""
Return `True` if `node` and `nodeOther` have the same parent node. Otherwise, return `False`.
Both `node` and `nodeOther` must be Entry types with items containing Group or LogicNode types.
"""
if node.parent is nodeOther.parent: return True
else: return False
def removeGroup(self, groupToRemove):
"""
Removes a group that is in a tree from the database. In addition to deleting from self.entries,
it must also update the parent/child relationships
Returns the removed group
"""
#Don't remove top nodes or LogicOrs as this will cause lots of problems
if groupToRemove in self.top:
raise Exception("Cannot remove top node: {0} from {1} because it is a top node".format(groupToRemove, self))
elif isinstance(groupToRemove.item, LogicOr):
raise Exception ("Cannot remove top node: {0} from {1} because it is a LogicOr".format(groupToRemove, self))
#Remove from entryToRemove from entries
self.entries.pop(groupToRemove.label)
#If there is a parent, then the group exists in a tree and we should edit relatives
parentR=groupToRemove.parent
if not parentR is None:
#Remove from parent's children attribute
parentR.children.remove(groupToRemove)
#change children's parent attribute to former grandparent
for child in groupToRemove.children:
child.parent = parentR
#extend parent's children attribute with new children
parentR.children.extend(groupToRemove.children)
#A few additional changes needed if parentR is a LogicOr node
if isinstance(parentR.item, LogicOr):
parentR.item.components.remove(groupToRemove.label)
parentR.item.components.extend([child.label for child in groupToRemove.children])
return groupToRemove
class LogicNode:
"""
A base class for AND and OR logic nodes.
"""
symbol="<TBD>" # To be redefined by subclass
def __init__(self,items,invert):
self.components = []
for item in items:
if re.match("(?i)\s*(NOT\s)?\s*(OR|AND|UNION)\s*(\{.*\})",item):
component = makeLogicNode(item)
else:
component = item
self.components.append(component)
self.invert = bool(invert)
def __str__(self):
result = ''
if self.invert: result += 'NOT '
result += self.symbol
result += "{{{0}}}".format(', '.join([str(c) for c in self.components]))
return result
class LogicOr(LogicNode):
"""
A logical OR node. Structure can match any component.
Initialize with a list of component items and a boolean instruction to invert the answer.
"""
symbol = "OR"
def matchToStructure(self,database,structure,atoms,strict=False):
"""
Does this node in the given database match the given structure with the labeled atoms?
Setting `strict` to True makes enforces matching of atomLabels in the structure to every
atomLabel in the node.
"""
for node in self.components:
if isinstance(node,LogicNode):
match = node.matchToStructure(database, structure, atoms, strict)
else:
match = database.matchNodeToStructure(node, structure, atoms, strict)
if match:
return True != self.invert
return False != self.invert
def matchLogicOr(self, other):
"""
Is other the same LogicOr group as self?
"""
if len(self.components)!=len(other.components):
return False
else:
for node in self.components:
if node not in other.components:
return False
return True
def getPossibleStructures(self, entries):
"""
Return a list of the possible structures below this node.
"""
if self.invert: raise NotImplementedError("Finding possible structures of NOT OR nodes not implemented.")
structures = []
for item in self.components:
struct = entries[item].item
if isinstance(struct, LogicNode):
structures.extend(struct.getPossibleStructures(entries))
else:
structures.append(struct)
for struct in structures: # check this worked
assert isinstance(struct,Group)
return structures
class LogicAnd(LogicNode):
"""A logical AND node. Structure must match all components."""
symbol = "AND"
def matchToStructure(self,database,structure,atoms,strict=False):
"""
Does this node in the given database match the given structure with the labeled atoms?
Setting `strict` to True makes enforces matching of atomLabels in the structure to every
atomLabel in the node.
"""
for node in self.components:
if isinstance(node,LogicNode):
match = node.matchToStructure(database, structure, atoms, strict)
else:
match = database.matchNodeToStructure(node, structure, atoms, strict)
if not match:
return False != self.invert
return True != self.invert
def makeLogicNode(string):
"""
Creates and returns a node in the tree which is a logic node.
String should be of the form:
* OR{}
* AND{}
* NOT OR{}
* NOT AND{}
And the returned object will be of class LogicOr or LogicAnd
"""
match = re.match("(?i)\s*(NOT\s)?\s*(OR|AND|UNION)\s*(\{.*\})",string) # the (?i) makes it case-insensitive
if not match:
raise Exception("Unexpected string for Logic Node: {0}".format(string))
if match.group(1): invert = True
else: invert = False
logic = match.group(2) # OR or AND (or Union)
contents = match.group(3).strip()
while contents.startswith('{'):
if not contents.endswith('}'):
raise Exception("Unbalanced braces in Logic Node: {0}".format(string))
contents = contents[1:-1]
items=[]
chars=[]
brace_depth = 0
for character in contents:
if character == '{':
brace_depth += 1
if character == '}':
brace_depth -= 1
if character == ',' and brace_depth == 0:
items.append(''.join(chars).lstrip().rstrip() )
chars = []
else:
chars.append(character)
if chars: # add last item
items.append(''.join(chars).lstrip().rstrip() )
if brace_depth != 0: raise Exception("Unbalanced braces in Logic Node: {0}".format(string))
if logic.upper() in ['OR', 'UNION']:
return LogicOr(items, invert)
if logic == 'AND':
return LogicAnd(items, invert)
raise Exception("Could not create Logic Node from {0}".format(string))
################################################################################
def removeCommentFromLine(line):
"""
Remove a C++/Java style comment from a line of text. This refers
particularly to comments that begin with a double-slash '//' and continue
to the end of the line.
"""
index = line.find('//')
if index >= 0:
line = line[0:index]
return line
def splitLineAndComment(line):
"""
Returns a tuple(line, comment) based on a '//' comment delimiter.
Either `line` or `comment` may be ''.
Does not strip whitespace, nor remove more than two slashes.
"""
split = line.split('//',1)
if len(split) == 1:
return (split[0],'')
else:
return tuple(split)
def getAllCombinations(nodeLists):
"""
Generate a list of all possible combinations of items in the list of
lists `nodeLists`. Each combination takes one item from each list
contained within `nodeLists`. The order of items in the returned lists
reflects the order of lists in `nodeLists`. For example, if `nodeLists` was
[[A, B, C], [N], [X, Y]], the returned combinations would be
[[A, N, X], [A, N, Y], [B, N, X], [B, N, Y], [C, N, X], [C, N, Y]].
"""
items = [[]]
for nodeList in nodeLists:
items = [ item + [node] for node in nodeList for item in items ]
return items
################################################################################
class ForbiddenStructureException(Exception):
"""
Made a forbidden structure.
"""
pass
class ForbiddenStructures(Database):
"""
A database consisting solely of structures that are forbidden
from occurring.
"""
def isMoleculeForbidden(self, molecule):
"""
Return ``True`` if the given :class:`Molecule` object `molecule`
contains forbidden functionality, or ``False`` if not. Labeled atoms
on the forbidden structures and the molecule are honored.
"""
for entry in self.entries.values():
entryLabeledAtoms = entry.item.getLabeledAtoms()
moleculeLabeledAtoms = molecule.getLabeledAtoms()
initialMap = {}
for label in entryLabeledAtoms:
# all group labels must be present in the molecule
if label not in moleculeLabeledAtoms: break
initialMap[moleculeLabeledAtoms[label]] = entryLabeledAtoms[label]
else:
if molecule.isMappingValid(entry.item, initialMap) and molecule.isSubgraphIsomorphic(entry.item, initialMap):
return True
# Until we have more thermodynamic data of molecular ions we will forbid them
molecule_charge = 0
for atom in molecule.atoms:
molecule_charge += atom.charge
if molecule_charge != 0:
return True
return False
def loadOld(self, path):
"""
Load an old forbidden structures file from the location `path` on disk.
"""
self.loadOldDictionary(path, pattern=True)
return self
def saveOld(self, path):
"""
Save an old forbidden structures file to the location `path` on disk.
"""
self.saveOldDictionary(path)
def loadEntry(self, label, molecule=None, group=None, shortDesc='', longDesc=''):
"""
Load an entry from the forbidden structures database. This method is
automatically called during loading of the forbidden structures
database.
"""
assert molecule is not None or group is not None
assert not (molecule is not None and group is not None)
if molecule is not None:
item = Molecule.fromAdjacencyList(molecule)
elif group is not None:
if ( group[0:3].upper() == 'OR{' or
group[0:4].upper() == 'AND{' or
group[0:7].upper() == 'NOT OR{' or
group[0:8].upper() == 'NOT AND{'
):
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
label = label,
item = item,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry, name='entry'):
"""
Save an `entry` from the forbidden structures database. This method is
automatically called during saving of the forbidden structures
database.
"""
f.write('{0}(\n'.format(name))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
else:
f.write(' group = "{0}",\n'.format(entry.item))
f.write(' shortDesc = u"""{0}""",\n'.format(entry.shortDesc))
f.write(' longDesc = \n')
f.write('u"""\n')
f.write(entry.longDesc.strip() + "\n")
f.write('""",\n')
f.write(')\n\n')
|
nyee/RMG-Py
|
rmgpy/data/base.py
|
Python
|
mit
| 56,343
|
[
"RDKit"
] |
e241019addd624a3d69ddbe1ff1fb2e318017a41aba3732fcdb3ce10bb88c42c
|
#
# Generates random data for SYNTH dataset in paper
# N dimensions, 1 group-by dim (5 outlier, 5 normal groups), 1 value attr
# Normal points draw value from N(u_h, s_h) -- gaussian, u_h mean, s_h std
# Outlier points draw value from N(u_o, s_o)
# Outlier points fall in a 10% (variable) volume box in k<=N dimensions
#
import pdb
import sys
import random
from operator import and_, mul
random.seed(0)
def rand_box(ndim, kdim, vol, bounds=None):
"""
outlier attributes are a_1,...a_kdim
"""
if not bounds:
bounds = [(0, 100)] * ndim
bedges = [b[1]-b[0] for b in bounds]
totalvol = reduce(mul, bedges)
usedvol = reduce(mul, bedges[kdim:], 1)
availvol = max(0, vol * totalvol / usedvol)
edge = availvol ** (1. / kdim)
ret = []
for attr, bound in zip(xrange(ndim), bounds):
if attr < kdim:
lower = random.random() * (bound[1]-bound[0] - edge) + bound[0]
upper = lower + edge
else:
lower, upper = bound[0], bound[1]
ret.append((lower, upper))
return ret
def in_box(pt, box):
return reduce(and_, [l <= v and v <= u for v, (l, u) in zip(pt, box)])
def rand_point(ndim):
return [random.random() * 100 for i in xrange(ndim)]
def gen_multi_outliers(npts, ndim, kdim, vol, uh=10, sh=5, uo=90, so=5):
# completely reproducable
random.seed(0)
nclusters = 2
norm_gen, mid_gen, outlier_gen = make_gen(uh, sh), make_gen((uo-uh)/2+uh, so), make_gen(uo, so)
med_boxes = [rand_box(ndim, kdim, vol/2.) for i in xrange(nclusters)]
high_boxes = [rand_box(ndim, kdim, vol, bounds=med_box) for med_box in med_boxes]
for med_box in med_boxes:
print >>sys.stderr, map(lambda arr: map(int, arr), med_box)
for high_box in high_boxes:
print >>sys.stderr, map(lambda arr: map(int, arr), high_box)
schema = ['a_%d' % i for i in xrange(ndim)] + ['g', 'v']
def generate():
for gid in xrange(10):
for i in xrange(npts):
pt = rand_point(ndim)
# add group and value
pt.append(gid)
if gid >= 5 and any([in_box(pt, mb) for mb in med_boxes]):
if any([in_box(pt, hb) for hb in high_boxes]):
pt.append(outlier_gen())
else:
pt.append(mid_gen())
else:
pt.append(norm_gen())
yield pt
return med_boxes, high_boxes, schema, generate()
def gen_points(npts, ndim, kdim, vol, uh=10, sh=5, uo=90, so=5):
# completely reproducable
random.seed(0)
norm_gen, mid_gen, outlier_gen = make_gen(uh, sh), make_gen((uo-uh)/2+uh, so), make_gen(uo, so)
outlier_box = rand_box(ndim, kdim, vol)
super_box = rand_box(ndim, kdim, vol, bounds=outlier_box)
print >>sys.stderr, map(lambda arr: map(int, arr), outlier_box)
print >>sys.stderr, map(lambda arr: map(int, arr), super_box)
schema = ['a_%d' % i for i in xrange(ndim)] + ['g', 'v']
def generate():
for gid in xrange(10):
for i in xrange(npts):
pt = rand_point(ndim)
# add group and value
pt.append(gid)
if gid >= 5 and in_box(pt, outlier_box):
if in_box(pt, super_box):
pt.append(outlier_gen())
else:
pt.append(mid_gen())
else:
pt.append(norm_gen())
yield pt
return outlier_box, super_box, schema, generate()
def make_gen(u, s):
return lambda: random.gauss(u, s)
if __name__ == '__main__':
import sys
if len(sys.argv) < 5:
print "Generates SYNTH dataset for paper. (single k dim sub-cluster of outlier points) "
print "tuple format: [a_0,..., a_ndim, a_group, a_val]"
print "Usage:\tpython gensinglecluster.py npts ndim kdim volperc [uh, sh, uo, so]"
print "\tnpts: number of points per group (10 groups, 5 outlier, 5 normal)"
print "\tkdim: dimensions of outlier cluster"
print "\tvolperc: percentage volume of outlier cluster (default: 10%)"
print "\tuh/uo: mean of normal and outlier a_val value (defaults: 10, 90)"
print "\tsh/so: std of a_val values (defaults: 5, 5)"
exit()
npts, ndim, kdim = map(int, sys.argv[1:4])
volperc = float(sys.argv[4])
if len(sys.argv) > 5:
uh, sh, uo, so = map(float, sys.argv[5:])
else:
uh, sh, uo, so = 10, 5, 90, 5
# print schema
outlier_box, super_box, schema, pts = gen_points(npts, ndim, kdim, volperc, uh, sh, uo, so)
print "\t".join(schema)
for pt in pts:
print '\t'.join(['%.4f'] * len(pt)) % tuple(pt)
|
sirrice/scorpion
|
scorpion/misc/gensinglecluster.py
|
Python
|
mit
| 4,523
|
[
"Gaussian"
] |
fcfd132e145c35c487bec64de5ec340f98e8a947b576b26cec3d9242f53035c4
|
"""
Script to plot the autocorrelation and fit for previously found optimal parameters
"""
from mjhmc.search.objective import obj_func_helper
from mjhmc.figures.ac_fig import load_params
from mjhmc.samplers.markov_jump_hmc import ControlHMC, MarkovJumpHMC
from mjhmc.misc.distributions import RoughWell, Gaussian, MultimodalGaussian
from mjhmc.misc.plotting import plot_fit
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def plot_all_best(custom_params=None):
""" Creates a plot with the autocorrelation and fit for each distribution and sampler
:param custom_params: dictionary of custom params will be used on all distributions
and samplers. if None uses the current best params for each
:returns: None
:rtype: None
"""
distributions = [
RoughWell(nbatch=200)
# Gaussian(ndims=10, nbatch=200),
# MultimodalGaussian(ndims=5, separation=1)
]
samplers = [
ControlHMC,
MarkovJumpHMC
]
with PdfPages("validation.pdf") as pdf:
for distribution in distributions:
# [control, mjhmc, lahmc]
if custom_params is None:
params = load_params(distribution)
else:
params = [custom_params] * 3
active_params = params[:-1]
for sampler, hparams in zip(samplers, active_params):
print "Now running for {} on {}".format(sampler, distribution)
cos_coef, n_grad_evals, exp_coef, autocor, _ = obj_func_helper(
sampler, distribution.reset(), False, hparams)
fig = plot_fit(n_grad_evals,
autocor,
exp_coef,
cos_coef,
'validation',
hparams,
save=False
)
pdf.savefig(fig)
def plot_comparison(samplers, params, distribution):
""" Plot a comparison between samplers and params
:param samplers: list of samplers to test
:param params: respective list of parameters for each sampler
:param distribution: distribution to compare on
:returns: None
:rtype: None
"""
for sampler, hparams in zip(samplers, params):
_, n_grad_evals, _, autocor, _ = obj_func_helper(
sampler, distribution.reset(), False, hparams)
plt.plot(n_grad_evals, autocor,
label="B: {}, eps: {}, M: {}".format(hparams['beta'],
hparams['epsilon'],
hparams['num_leapfrog_steps']))
plt.legend()
plt.savefig('comparison.pdf')
|
rueberger/MJHMC
|
mjhmc/search/validation.py
|
Python
|
gpl-2.0
| 2,780
|
[
"Gaussian"
] |
b61b2b2373b181bbf351dfb14309725cdef6694a1b027464d97a06281817e9fa
|
"""Main entry point for dvc CLI."""
import logging
from dvc._debug import debugtools
from dvc.cli import parse_args
from dvc.config import ConfigError
from dvc.exceptions import DvcException, DvcParserError, NotDvcRepoError
from dvc.logger import FOOTER, disable_other_loggers
# Workaround for CPython bug. See [1] and [2] for more info.
# [1] https://github.com/aws/aws-cli/blob/1.16.277/awscli/clidriver.py#L55
# [2] https://bugs.python.org/issue29288
"".encode("idna")
logger = logging.getLogger("dvc")
def main(argv=None): # noqa: C901
"""Run dvc CLI command.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Returns:
int: command's return code.
"""
args = None
disable_other_loggers()
outerLogLevel = logger.level
try:
args = parse_args(argv)
level = None
if args.quiet:
level = logging.CRITICAL
elif args.verbose == 1:
level = logging.DEBUG
elif args.verbose > 1:
level = logging.TRACE
if level is not None:
logger.setLevel(level)
logger.trace(args)
if not args.quiet:
from dvc.ui import ui
ui.enable()
with debugtools(args):
cmd = args.func(args)
ret = cmd.do_run()
except ConfigError:
logger.exception("configuration error")
ret = 251
except KeyboardInterrupt:
logger.exception("interrupted by the user")
ret = 252
except NotDvcRepoError:
logger.exception("")
ret = 253
except DvcParserError:
ret = 254
except DvcException:
ret = 255
logger.exception("")
except Exception as exc: # noqa, pylint: disable=broad-except
# pylint: disable=no-member
import errno
if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
from dvc.utils import error_link
logger.exception(
"too many open files, please visit "
"{} to see how to handle this "
"problem".format(error_link("many-files")),
extra={"tb_only": True},
)
else:
from dvc.info import get_dvc_info
logger.exception("unexpected error")
dvc_info = get_dvc_info()
logger.debug("Version info for developers:\n%s", dvc_info)
logger.info(FOOTER)
ret = 255
try:
from dvc import analytics
if analytics.is_enabled():
analytics.collect_and_send_report(args, ret)
return ret
finally:
logger.setLevel(outerLogLevel)
from dvc.fs.pool import close_pools
# Closing pools by-hand to prevent weird messages when closing SSH
# connections. See https://github.com/iterative/dvc/issues/3248 for
# more info.
close_pools()
from dvc.external_repo import clean_repos
# Remove cached repos in the end of the call, these are anonymous
# so won't be reused by any other subsequent run anyway.
clean_repos()
|
efiop/dvc
|
dvc/main.py
|
Python
|
apache-2.0
| 3,133
|
[
"VisIt"
] |
baee61d0767b4050893d75fe59128cfb9cabd714ab41bb3c0f541bf4cbf6cda7
|
# coarse graining in MDAnalysis
# Copyright (c) 2015, 2019 Richard J Gowers
# Released under the GNU Lesser General Public License, version 2 or later.
from numpy.testing import assert_allclose
import MDAnalysis as mda
from cguniverse import CGUniverse
import pytest
@pytest.fixture
def water_at_universe():
return mda.Universe('testdata/water.gro')
@pytest.fixture
def water_cg_universe():
mapping = {'SOL': [[0, 1, 2]]}
return CGUniverse('testdata/water.gro', mapping=mapping)
def test_creation(water_cg_universe):
assert len(water_cg_universe.atoms) == 3
def test_pos1(water_at_universe, water_cg_universe):
"""First "atom" position should be COM of first residue"""
for res, atom in zip(water_at_universe.residues, water_cg_universe.atoms):
assert_allclose(res.atoms.center_of_mass(), atom.position)
@pytest.fixture
def oct_atu():
return mda.Universe('testdata/octanol.gro')
@pytest.fixture
def oct_cgu():
return CGUniverse('testdata/octanol.gro',
mapping={'OcOH':[list(range(11)), list(range(11, 27))]})
def test_residue_com(oct_atu, oct_cgu):
# check that the COM of the first residue is unchanged
assert_allclose(oct_atu.residues[0].atoms.center_of_mass(),
oct_cgu.residues[0].atoms.center_of_mass())
|
richardjgowers/MDAnalysis-coarsegraining
|
test_cguniverse.py
|
Python
|
gpl-2.0
| 1,314
|
[
"MDAnalysis"
] |
3a54c06b45119fc84f3233e8a6d16c5aa9ea5abd4437db346a41eed725d1c7d3
|
# -*- coding: utf-8 -*-
#
# helpers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""PyNEST Microcircuit: Helper Functions
-------------------------------------------
Helper functions for network construction, simulation and evaluation of the
microcircuit.
"""
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
import os
import numpy as np
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2):
"""Computes the total number of synapses between two populations from
connection probabilities.
Here it is irrelevant which population is source and which target.
Parameters
----------
conn_probs
Matrix of connection probabilities.
popsize1
Size of first poulation.
popsize2
Size of second population.
Returns
-------
num_synapses
Matrix of synapse numbers.
"""
prod = np.outer(popsize1, popsize2)
num_synapses = np.log(1. - conn_probs) / np.log((prod - 1.) / prod)
return num_synapses
def postsynaptic_potential_to_current(C_m, tau_m, tau_syn):
r""" Computes a factor to convert postsynaptic potentials to currents.
The time course of the postsynaptic potential ``v`` is computed as
:math: `v(t)=(i*h)(t)`
with the exponential postsynaptic current
:math:`i(t)=J\mathrm{e}^{-t/\tau_\mathrm{syn}}\Theta (t)`,
the voltage impulse response
:math:`h(t)=\frac{1}{\tau_\mathrm{m}}\mathrm{e}^{-t/\tau_\mathrm{m}}\Theta (t)`,
and
:math:`\Theta(t)=1` if :math:`t\geq 0` and zero otherwise.
The ``PSP`` is considered as the maximum of ``v``, i.e., it is
computed by setting the derivative of ``v(t)`` to zero.
The expression for the time point at which ``v`` reaches its maximum
can be found in Eq. 5 of [1]_.
The amplitude of the postsynaptic current ``J`` corresponds to the
synaptic weight ``PSC``.
References
----------
.. [1] Hanuschkin A, Kunkel S, Helias M, Morrison A and Diesmann M (2010)
A general and efficient method for incorporating precise spike times
in globally time-driven simulations.
Front. Neuroinform. 4:113.
DOI: `10.3389/fninf.2010.00113 <https://doi.org/10.3389/fninf.2010.00113>`__.
Parameters
----------
C_m
Membrane capacitance (in pF).
tau_m
Membrane time constant (in ms).
tau_syn
Synaptic time constant (in ms).
Returns
-------
PSC_over_PSP
Conversion factor to be multiplied to a `PSP` (in mV) to obtain a `PSC`
(in pA).
"""
sub = 1. / (tau_syn - tau_m)
pre = tau_m * tau_syn / C_m * sub
frac = (tau_m / tau_syn) ** sub
PSC_over_PSP = 1. / (pre * (frac**tau_m - frac**tau_syn))
return PSC_over_PSP
def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext):
""" Computes DC input if no Poisson input is provided to the microcircuit.
Parameters
----------
bg_rate
Rate of external Poisson generators (in spikes/s).
K_ext
External indegrees.
tau_syn
Synaptic time constant (in ms).
PSC_ext
Weight of external connections (in pA).
Returns
-------
DC
DC input (in pA) which compensates lacking Poisson input.
"""
DC = bg_rate * K_ext * PSC_ext * tau_syn * 0.001
return DC
def adjust_weights_and_input_to_synapse_scaling(
full_num_neurons,
full_num_synapses,
K_scaling,
mean_PSC_matrix,
PSC_ext,
tau_syn,
full_mean_rates,
DC_amp,
poisson_input,
bg_rate,
K_ext):
""" Adjusts weights and external input to scaling of indegrees.
The recurrent and external weights are adjusted to the scaling
of the indegrees. Extra DC input is added to compensate for the
scaling in order to preserve the mean and variance of the input.
Parameters
----------
full_num_neurons
Total numbers of neurons.
full_num_synapses
Total numbers of synapses.
K_scaling
Scaling factor for indegrees.
mean_PSC_matrix
Weight matrix (in pA).
PSC_ext
External weight (in pA).
tau_syn
Synaptic time constant (in ms).
full_mean_rates
Firing rates of the full network (in spikes/s).
DC_amp
DC input current (in pA).
poisson_input
True if Poisson input is used.
bg_rate
Firing rate of Poisson generators (in spikes/s).
K_ext
External indegrees.
Returns
-------
PSC_matrix_new
Adjusted weight matrix (in pA).
PSC_ext_new
Adjusted external weight (in pA).
DC_amp_new
Adjusted DC input (in pA).
"""
PSC_matrix_new = mean_PSC_matrix / np.sqrt(K_scaling)
PSC_ext_new = PSC_ext / np.sqrt(K_scaling)
# recurrent input of full network
indegree_matrix = \
full_num_synapses / full_num_neurons[:, np.newaxis]
input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates,
axis=1)
DC_amp_new = DC_amp \
+ 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_rec
if poisson_input:
input_ext = PSC_ext * K_ext * bg_rate
DC_amp_new += 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_ext
return PSC_matrix_new, PSC_ext_new, DC_amp_new
def plot_raster(path, name, begin, end, N_scaling):
""" Creates a spike raster plot of the network activity.
Parameters
-----------
path
Path where the spike times are stored.
name
Name of the spike recorder.
begin
Time point (in ms) to start plotting spikes (included).
end
Time point (in ms) to stop plotting spikes (included).
N_scaling
Scaling factor for number of neurons.
Returns
-------
None
"""
fs = 18 # fontsize
ylabels = ['L2/3', 'L4', 'L5', 'L6']
color_list = np.tile(['#595289', '#af143c'], 4)
sd_names, node_ids, data = __load_spike_times(path, name, begin, end)
last_node_id = node_ids[-1, -1]
mod_node_ids = np.abs(node_ids - last_node_id) + 1
label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / 2.
for i in np.arange(0, 8, 2)]
stp = 1
if N_scaling > 0.1:
stp = int(10. * N_scaling)
print(' Only spikes of neurons in steps of {} are shown.'.format(stp))
plt.figure(figsize=(8, 6))
for i, n in enumerate(sd_names):
times = data[i]['time_ms']
neurons = np.abs(data[i]['sender'] - last_node_id) + 1
plt.plot(times[::stp], neurons[::stp], '.', color=color_list[i])
plt.xlabel('time [ms]', fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(label_pos, ylabels, fontsize=fs)
plt.savefig(os.path.join(path, 'raster_plot.png'), dpi=300)
def firing_rates(path, name, begin, end):
""" Computes mean and standard deviation of firing rates per population.
The firing rate of each neuron in each population is computed and stored
in a .dat file in the directory of the spike recorders. The mean firing
rate and its standard deviation are printed out for each population.
Parameters
-----------
path
Path where the spike times are stored.
name
Name of the spike recorder.
begin
Time point (in ms) to start calculating the firing rates (included).
end
Time point (in ms) to stop calculating the firing rates (included).
Returns
-------
None
"""
sd_names, node_ids, data = __load_spike_times(path, name, begin, end)
all_mean_rates = []
all_std_rates = []
for i, n in enumerate(sd_names):
senders = data[i]['sender']
# 1 more bin than node ids per population
bins = np.arange(node_ids[i, 0], node_ids[i, 1] + 2)
spike_count_per_neuron, _ = np.histogram(senders, bins=bins)
rate_per_neuron = spike_count_per_neuron * 1000. / (end - begin)
np.savetxt(os.path.join(path, ('rate' + str(i) + '.dat')),
rate_per_neuron)
# zeros are included
all_mean_rates.append(np.mean(rate_per_neuron))
all_std_rates.append(np.std(rate_per_neuron))
print('Mean rates: {} spikes/s'.format(np.around(all_mean_rates, decimals=3)))
print('Standard deviation of rates: {} spikes/s'.format(
np.around(all_std_rates, decimals=3)))
def boxplot(path, populations):
""" Creates a boxblot of the firing rates of all populations.
To create the boxplot, the firing rates of each neuron in each population
need to be computed with the function ``firing_rate()``.
Parameters
-----------
path
Path where the firing rates are stored.
populations
Names of neuronal populations.
Returns
-------
None
"""
fs = 18
pop_names = [string.replace('23', '2/3') for string in populations]
label_pos = list(range(len(populations), 0, -1))
color_list = ['#af143c', '#595289']
medianprops = dict(linestyle='-', linewidth=2.5, color='black')
meanprops = dict(linestyle='--', linewidth=2.5, color='lightgray')
rates_per_neuron_rev = []
for i in np.arange(len(populations))[::-1]:
rates_per_neuron_rev.append(
np.loadtxt(os.path.join(path, ('rate' + str(i) + '.dat'))))
plt.figure(figsize=(8, 6))
bp = plt.boxplot(rates_per_neuron_rev, 0, 'rs', 0, medianprops=medianprops,
meanprops=meanprops, meanline=True, showmeans=True)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# boxcolors
for i in np.arange(len(populations)):
boxX = []
boxY = []
box = bp['boxes'][i]
for j in list(range(5)):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
k = i % 2
boxPolygon = Polygon(boxCoords, facecolor=color_list[k])
plt.gca().add_patch(boxPolygon)
plt.xlabel('firing rate [spikes/s]', fontsize=fs)
plt.yticks(label_pos, pop_names, fontsize=fs)
plt.xticks(fontsize=fs)
plt.savefig(os.path.join(path, 'box_plot.png'), dpi=300)
def __gather_metadata(path, name):
""" Reads names and ids of spike recorders and first and last ids of
neurons in each population.
If the simulation was run on several threads or MPI-processes, one name per
spike recorder per MPI-process/thread is extracted.
Parameters
------------
path
Path where the spike recorder files are stored.
name
Name of the spike recorder, typically ``spike_recorder``.
Returns
-------
sd_files
Names of all files written by spike recorders.
sd_names
Names of all spike recorders.
node_ids
Lowest and highest id of nodes in each population.
"""
# load filenames
sd_files = []
sd_names = []
for fn in sorted(os.listdir(path)):
if fn.startswith(name):
sd_files.append(fn)
# spike recorder name and its ID
fnsplit = '-'.join(fn.split('-')[:-1])
if fnsplit not in sd_names:
sd_names.append(fnsplit)
# load node IDs
node_idfile = open(path + 'population_nodeids.dat', 'r')
node_ids = []
for node_id in node_idfile:
node_ids.append(node_id.split())
node_ids = np.array(node_ids, dtype='i4')
return sd_files, sd_names, node_ids
def __load_spike_times(path, name, begin, end):
""" Loads spike times of each spike recorder.
Parameters
----------
path
Path where the files with the spike times are stored.
name
Name of the spike recorder.
begin
Time point (in ms) to start loading spike times (included).
end
Time point (in ms) to stop loading spike times (included).
Returns
-------
data
Dictionary containing spike times in the interval from ``begin``
to ``end``.
"""
sd_files, sd_names, node_ids = __gather_metadata(path, name)
data = {}
dtype = {'names': ('sender', 'time_ms'), # as in header
'formats': ('i4', 'f8')}
for i, name in enumerate(sd_names):
data_i_raw = np.array([[]], dtype=dtype)
for j, f in enumerate(sd_files):
if name in f:
# skip header while loading
ld = np.loadtxt(os.path.join(path, f), skiprows=3, dtype=dtype)
data_i_raw = np.append(data_i_raw, ld)
data_i_raw = np.sort(data_i_raw, order='time_ms')
# begin and end are included if they exist
low = np.searchsorted(data_i_raw['time_ms'], v=begin, side='left')
high = np.searchsorted(data_i_raw['time_ms'], v=end, side='right')
data[i] = data_i_raw[low:high]
return sd_names, node_ids, data
|
sdiazpier/nest-simulator
|
pynest/examples/Potjans_2014/helpers.py
|
Python
|
gpl-2.0
| 13,631
|
[
"NEURON"
] |
abef0bb526fbf651ccaa7a99cd9b52f783aabd8d599a83b626a532344ea0a152
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree.py
lttree.py is an extension of the generic ttree.py program.
This version can understand and manipulate ttree-style templates which
are specialized for storing molecule-specific data for use in LAMMPS.
The main difference between lttree.py and ttree.py is:
Unlike ttree.py, lttree.py understands rigid-body movement commands like
"rot()" and "move()" which allows it to reorient and move each copy
of a molecule to a new location. (ttree.py just ignores these commands.
Consequently LAMMPS input file (fragments) created with ttree.py have
invalid (overlapping) atomic coordinates and must be modified or aguemted
later (by loading atomic coordinates from a PDB file or an XYZ file).
lttree.py understands the "Data Atoms" section of a LAMMPS
data file (in addition to the various "atom_styles" which effect it).
Additional LAMMPS-specific features may be added in the future.
"""
import sys
from ttree import *
from lttree_styles import *
from ttree_matrix_stack import *
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
class LttreeSettings(BasicUISettings):
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command'):
BasicUISettings.__init__(self,
user_bindings_x,
user_bindings,
order_method)
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Data Atoms" section of a LAMMPS data file:
self.column_names = [] #<--A list of column names (optional)
self.ii_coords=[] #<--A list of triplets of column indexes storing coordinate data
self.ii_vects=[] #<--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid=None #<--An integer indicating which column has the atomid
self.i_atomtype=None #<--An integer indicating which column has the atomtype
self.i_molid=None #<--An integer indicating which column has the molid, if applicable
self.infile=None # Name of the outermost file. This is the file
# which was read at the moment parsing begins.
def LttreeParseArgs(argv, settings):
BasicUIParseArgs(argv, settings)
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i+1 >= len(argv):
raise InputError('Error('+g_program_name+'): The '+argv[i]+' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
settings.column_names = AtomStyle2ColNames(argv[i+1])
sys.stderr.write('\n \"'+data_atoms+'\" column format:\n')
sys.stderr.write(' '+(' '.join(settings.column_names))+'\n\n')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(settings.column_names)
del(argv[i:i+2])
elif (argv[i].lower() == '-icoord'):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"'+data_atoms+'\" section of a LAMMPS data file.\n')
ilist = argv[i+1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: '+argv[i]+' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.iaffinevects = []
for i in range(0, len(ilist)/3):
cols = [int(ilist[3*i])+1,
int(ilist[3*i+1])+1,
int(ilist[3*i+2])+1]
settings.iaffinevects.append(cols)
del(argv[i:i+2])
elif (argv[i].lower() == '-ivect'):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"'+data_atoms+'\" section of a LAMMPS data file.\n')
ilist = argv[i+1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: '+argv[i]+' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.ivects = []
for i in range(0, len(ilist)/3):
cols = [int(ilist[3*i])+1,
int(ilist[3*i+1])+1,
int(ilist[3*i+2])+1]
settings.ivects.append(cols)
del(argv[i:i+2])
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i+1])-1
del(argv[i:i+2])
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i+1])-1
del(argv[i:i+2])
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_molid = int(argv[i+1])-1
del(argv[i:i+2])
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
#elif (__name__ == "__main__"):
raise InputError('Error('+g_program_name+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
settings.lex = TemplateLexer(open(argv[1], 'r'), argv[1]) # Parse text from file
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"'+argv[1]+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error('+g_program_name+'):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
if len(settings.ii_coords) == 0:
sys.stderr.write('########################################################\n'
'## WARNING: atom_style unspecified ##\n'
'## --> \"'+data_atoms+'\" column data has an unknown format ##\n'
'## Assuming atom_style = \"full\" ##\n'
# '########################################################\n'
# '## To specify the \"'+data_atoms+'\" column format you can: ##\n'
# '## 1) Use the -atomstyle \"STYLE\" argument ##\n'
# '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
# '## atom_style, including hybrid styles.(Standard ##\n'
# '## atom styles defined in 2011 are supported.) ##\n'
# '## 2) Use the -atomstyle \"COL_LIST\" argument ##\n'
# '## where \"COL_LIST" is a quoted list of strings ##\n'
# '## indicating the name of each column. ##\n'
# '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
# '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
# '## are interpreted as direction vectors. ##\n'
# '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
# '## where \"cx cy cz\" is a list of integers ##\n'
# '## indicating the column numbers for the x,y,z ##\n'
# '## coordinates of each atom. ##\n'
# '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
# '## where \"cmux cmuy cmuz...\" is a list of ##\n'
# '## integers indicating the column numbers for ##\n'
# '## the vector that determines the direction of a ##\n'
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
'########################################################\n')
# The default atom_style is "full"
settings.column_names = AtomStyle2ColNames('full')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(settings.column_names)
def TransformAtomText(text, matrix):
""" Apply transformations to the coordinates and other vector degrees
of freedom stored in the \"Data Atoms\" section of a LAMMPS data file.
This is the \"text\" argument.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' '+line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names)+3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters \n'
' within the \"'+data_atoms+'\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) < len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x0 = [0.0, 0.0, 0.0]
x = [0.0, 0.0, 0.0]
# Atomic coordinates transform using "affine" transformations
# (translations plus rotations [or other linear transformations])
for cxcycz in settings.ii_coords:
for d in range(0,3):
x0[d] = float(columns[cxcycz[d]])
AffineTransform(x, matrix, x0) # x = matrix * x0 + b
for d in range(0,3): #("b" is part of "matrix")
columns[cxcycz[d]] = str(x[d])
# Dipole moments and other direction-vectors
# are not effected by translational movement
for cxcycz in settings.ii_vects:
for d in range(0,3):
x0[d] = float(columns[cxcycz[d]])
LinearTransform(x, matrix, x0) # x = matrix * x0
for d in range(0,3):
columns[cxcycz[d]] = str(x[d])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def CalcCM(text_Atoms,
text_Masses=None,
settings=None):
types2masses = None
# Loop through the "Masses" section: what is the mass of each atom type?
if text_Masses != None:
types2masses = {}
lines = text_Masses.split('\n')
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) == 2:
atomtype = columns[0]
m = float(columns[1])
types2masses[atomtype] = m
lines = text_Atoms.split('\n')
# Pass 1 through the "Data Atoms" section: Determine each atom's mass
if text_Masses != None:
assert(settings != None)
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
atomid = columns[settings.i_atomid]
atomtype = columns[settings.i_atomtype]
if atomtype not in types2masses[atomtype]:
raise InputError('Error(lttree): You have neglected to define the mass of atom type: \"'+atomtype+'\"\n'
'Did you specify the mass of every atom type using write(\"Masses\"){}?')
atomid2mass[atomid] = atomtype2mass[atomtype]
# Pass 2 through the "Data Atoms" section: Find the center of mass.
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names)+3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters (ix, iy, iz)\n'
' within the \"'+data_atoms+'\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) != len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x = [0.0, 0.0, 0.0]
if atomids2masses != None:
m = atomids2masses[atomid]
else:
m = 1.0
tot_m += m
for cxcycz in settings.ii_coords:
for d in range(0,3):
x[d] = float(columns[cxcycz[d]])
tot_x[d] += x[d]
# Note: dipole moments and other direction vectors don't effect
# the center of mass. So I commented out the loop below.
#for cxcycz in settings.ii_vects:
# for d in range(0,3):
# v[d] = float(columns[cxcycz[d]])
lines[i] = ' '.join(columns)
xcm = [0.0, 0.0, 0.0]
for d in range(0,3):
xcm[d] = tot_x[d] / tot_m
return xcm
def _ExecCommands(command_list,
index,
global_files_content,
settings,
matrix_stack,
current_scope_id=None,
substitute_vars=True):
"""
_ExecCommands():
The argument "commands" is a nested list of lists of
"Command" data structures (defined in ttree.py).
Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
Instead of writing the files, save their contents in a string.
The argument "global_files_content" should be of type defaultdict(list)
It is an associative array whose key is a string (a filename)
and whose value is a lists of strings (of rendered templates).
"""
files_content = defaultdict(list)
postprocessing_commands = []
while index < len(command_list):
command = command_list[index]
index += 1
# For debugging only
if ((not isinstance(command, StackableCommand)) and
(not isinstance(command, ScopeCommand)) and
(not isinstance(command, WriteFileCommand))):
sys.stderr.write(str(command)+'\n')
if isinstance(command, PopCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
if isinstance(command, PopRightCommand):
matrix_stack.PopRight(which_stack = command.context_node)
elif isinstance(command, PopLeftCommand):
matrix_stack.PopLeft(which_stack = command.context_node)
else:
assert(False)
elif isinstance(command, PushCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
# Some commands are post-processing commands, and must be
# carried out AFTER all the text has been rendered. For example
# the "movecm(0,0,0)" waits until all of the coordinates have
# been rendered, calculates the center-of-mass, and then applies
# a translation moving the center of mass to the origin (0,0,0).
# We need to figure out which of these commands need to be
# postponed, and which commands can be carried out now.
# ("now"=pushing transformation matrices onto the matrix stack).
# UNFORTUNATELY POSTPONING SOME COMMANDS MAKES THE CODE UGLY
transform_list = command.contents.split('.')
transform_blocks = []
i_post_process = -1
# Example: Suppose:
#command.contents = '.rot(30,0,0,1).movecm(0,0,0).rot(45,1,0,0).scalecm(2.0).move(-2,1,0)'
# then
#transform_list = ['rot(30,0,0,1)', 'movecm(0,0,0)', 'rot(45,1,0,0)', 'scalecm(2.0)', 'move(-2,1,0)']
# Note: the first command 'rot(30,0,0,1)' is carried out now.
# The remaining commands are carried out during post-processing,
# (when processing the "ScopeEnd" command.
#
# We break up the commands into "blocks" separated by center-
# of-mass transformations ('movecm', 'rotcm', or 'scalecm')
#
# transform_blocks = ['.rot(30,0,0,1)',
# '.movecm(0,0,0).rot(45,1,0,0)',
# '.scalecm(2.0).move(-2,1,0)']
i = 0
while i < len(transform_list):
transform_block = ''
while i < len(transform_list):
transform = transform_list[i]
i += 1
if transform != '':
transform_block += '.' + transform
transform = transform.split('(')[0]
if ((transform == 'movecm') or
(transform == 'rotcm') or
(transform == 'scalecm')):
break
transform_blocks.append(transform_block)
if len(postprocessing_commands) == 0:
# The first block (before movecm, rotcm, or scalecm)
# can be executed now by modifying the matrix stack.
if isinstance(command, PushRightCommand):
matrix_stack.PushCommandsRight(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
elif isinstance(command, PushLeftCommand):
matrix_stack.PushCommandsLeft(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
# Everything else must be saved for later.
postprocessing_blocks = transform_blocks[1:]
else:
# If we already encountered a "movecm" "rotcm" or "scalecm"
# then all of the command blocks must be handled during
# postprocessing.
postprocessing_blocks = transform_blocks
for transform_block in postprocessing_blocks:
assert(isinstance(block, basestring))
if isinstance(command, PushRightCommand):
postprocessing_commands.append(PushRightCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, PushLeftCommand):
postprocessing_commands.append(PushLeftCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, WriteFileCommand):
# --- Throw away lines containin references to deleted variables:---
# First: To edit the content of a template,
# you need to make a deep local copy of it
tmpl_list = []
for entry in command.tmpl_list:
if isinstance(entry, TextBlock):
tmpl_list.append(TextBlock(entry.text,
entry.srcloc)) #, entry.srcloc_end))
else:
tmpl_list.append(entry)
# Now throw away lines with deleted variables
DeleteLinesWithBadVars(tmpl_list)
# --- Now render the text ---
text = Render(tmpl_list,
substitute_vars)
# ---- Coordinates of the atoms, must be rotated
# and translated after rendering.
# In addition, other vectors (dipoles, ellipsoid orientations)
# must be processed.
# This requires us to re-parse the contents of this text
# (after it has been rendered), and apply these transformations
# before passing them on to the caller.
if command.filename == data_atoms:
text = TransformAtomText(text, matrix_stack.M)
files_content[command.filename].append(text)
elif isinstance(command, ScopeBegin):
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PushStack(command.node)
# "command_list" is a long list of commands.
# ScopeBegin and ScopeEnd are (usually) used to demarcate/enclose
# the commands which are issued for a single class or
# class instance. _ExecCommands() carries out the commands for
# a single class/instance. If we reach a ScopeBegin(),
# then recursively process the commands belonging to the child.
index = _ExecCommands(command_list,
index,
files_content,
settings,
matrix_stack,
command.node,
substitute_vars)
elif isinstance(command, ScopeEnd):
if data_atoms in files_content:
for ppcommand in postprocessing_commands:
if data_masses in files_content:
xcm = CalcCM(files_content[data_atoms],
files_content[data_masses],
settings)
else:
xcm = CalcCM(files_content[data_atoms])
if isinstance(ppcommand, PushRightCommand):
matrix_stack.PushCommandsRight(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
elif isinstance(ppcommand, PushLeftCommand):
matrix_stack.PushCommandsLeft(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
files_content[data_atoms] = \
TransformAtomText(files_content[data_atoms],
matrix_stack.M)
for ppcommand in postprocessing_commands:
matrix_stack.Pop(which_stack = command.context_node)
#(same as PopRight())
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PopStack()
# "ScopeEnd" means we're done with this class/instance.
break
else:
assert(False)
# no other command types allowed at this point
# After processing the commands in this list,
# merge the templates with the callers template list
for filename, tmpl_list in files_content.items():
global_files_content[filename] += \
files_content[filename]
return index
def ExecCommands(commands,
files_content,
settings,
substitute_vars=True):
matrix_stack = MultiAffineStack()
index = _ExecCommands(commands,
0,
files_content,
settings,
matrix_stack,
None,
substitute_vars)
assert(index == len(commands))
def WriteFiles(files_content, suffix='', write_to_stdout=True):
for filename, str_list in files_content.items():
if filename != None:
out_file = None
if filename == '':
if write_to_stdout:
out_file = sys.stdout
else:
out_file = open(filename+suffix, 'a')
if out_file != None:
out_file.write(''.join(str_list))
if filename != '':
out_file.close()
if __name__ == "__main__":
"""
This is is a "main module" wrapper for invoking lttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
g_program_name = __file__.split('/')[-1] # ='lttree.py'
g_date_str = '2014-12-19'
g_version_str = '0.75'
####### Main Code Below: #######
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
sys.stderr.write('\n(python version '+str(sys.version)+')\n')
if sys.version < '2.6':
raise InputError('Error: Alas, you must upgrade to a newer version of python.')
try:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeParseArgs(sys.argv, settings)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Interpret the the commands. (These are typically write() or
# write_once() commands, rendering templates into text.
# This step also handles coordinate transformations and delete commands.
# Coordinate transformations can be applied to the rendered text
# as a post-processing step.
sys.stderr.write(' done\nbuilding templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands,
files_content,
settings,
False)
ExecCommands(g_instance_commands,
files_content,
settings,
False)
# Finally: write the rendered text to actual files.
# Erase the files that will be written to:
sys.stderr.write(' done\nwriting templates...')
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
# Write the files as templates
# (with the original variable names present)
WriteFiles(files_content, suffix=".template", write_to_stdout=False)
# Write the files with the variables substituted by values
sys.stderr.write(' done\nbuilding and rendering templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands, files_content, settings, True)
ExecCommands(g_instance_commands, files_content, settings, True)
sys.stderr.write(' done\nwriting rendered templates...\n')
WriteFiles(files_content)
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
# Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
|
ganzenmg/lammps_current
|
tools/moltemplate/src/lttree.py
|
Python
|
gpl-2.0
| 35,180
|
[
"LAMMPS"
] |
c870f6feeef543b7d5e893a9f46674176bd0800aaccdee1ac6e81f077f691fe1
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""A collection of miscellaneous utility functions.
"""
from __future__ import print_function
import pyformex as pf
import os,re,sys,tempfile,time
from config import formatDict
from odict import ODict
from distutils.version import LooseVersion as SaneVersion
# Python modules we know how to use
# Do not include pyformex or python here: they are predefined
# and could be erased by the detection
# The value is a sequence of:
# - module name
# - module name to load the version
# - a sequence of attributes to get the version
# If empty, the attribute is supposed to be '__version__'
# If module name is an empty string, it is supposed to be equal to our alias
known_modules = {
'calpy' : (),
'dicom' : (),
'docutils' : (),
'gdcm' : ('','','GDCM_VERSION'),
'gl2ps' : ('','','GL2PS_VERSION'),
'gnuplot' : ('Gnuplot',),
'matplotlib': (),
'numpy' : (),
'pyopengl' : ('OpenGL',),
'pyqt4' : ('PyQt4.QtCore','PyQt4','QtCore','QT_VERSION_STR'),
'pyqt4gl' : ('PyQt4.QtOpenGL','PyQt4','QtCore','QT_VERSION_STR'),
'pyside' : ('PySide',),
'vtk' : ('','','VTK_VERSION'),
}
known_externals = {
# NOTE: abaqus command may hang longtime on checking the license server
# 'abaqus': ('abaqus info=sys|head -n2|tail -n1', 'Abaqus (\S+)'),
'admesh': ('admesh --version', 'ADMesh - version (\S+)'),
'calculix': ('ccx -v','.*version (\S+)'),
'calix': ('calix --version','CALIX-(\S+)'),
'calpy': ('calpy --version','Calpy (\S+)'),
'dxfparser': ('pyformex-dxfparser --version','dxfparser (\S+)'),
'ffmpeg': ('ffmpeg -version','FFmpeg version (\S+)'),
'gts': ('gtsset -h','Usage(:) '),
'imagemagick': ('import -version','Version: ImageMagick (\S+)'),
'postabq': ('pyformex-postabq -V','postabq (\S+).*'),
'python': ('python --version','Python (\\S+)'),
'recordmydesktop': ('recordmydesktop --version','recordMyDesktop v(\S+)'),
'tetgen': ('tetgen -h |fgrep Version','Version (\S+)'),
'units': ('units --version','GNU Units version (\S+)'),
'vmtk': ('vmtk --help','Usage: vmtk(\S+).*'),
}
# Some regular expressions
digits = re.compile(r'(\d+)')
# versions of detected modules
the_version = ODict({
'pyformex':pf.__version__,
'python':sys.version.split()[0],
})
# versions of detected external commands
the_external = ODict({})
def checkVersion(name,version,external=False):
"""Checks a version of a program/module.
name is either a module or an external program whose availability has
been registered.
Default is to treat name as a module. Add external=True for a program.
Return value is -1, 0 or 1, depending on a version found that is
<, == or > than the requested values.
This should normally understand version numbers in the format 2.10.1
Returns -2 if no version found.
"""
if external:
ver = hasExternal(name)
else:
ver = hasModule(name)
if not ver:
return -2
if SaneVersion(ver) > SaneVersion(version):
return 1
elif SaneVersion(ver) == SaneVersion(version):
return 0
else:
return -1
def hasModule(name,check=False):
"""Test if we have the named module available.
Returns a nonzero (version) string if the module is available,
or an empty string if it is not.
By default, the module is only checked on the first call.
The result is remembered in the the_version dict.
The optional argument check==True forces a new detection.
"""
if name in the_version and not check:
return the_version[name]
else:
return checkModule(name)
def requireModule(name):
"""Ensure that the named Python module is available.
If the module is not available, an error is raised.
"""
if not hasModule(name):
if name in known_modules:
# Get the correct name, if different from our alias
try:
name = known_modules[name][0]
except:
pass
attr = 'required'
else:
attr = 'unknown'
errmsg = "Could not load %s module '%s'" % (attr,name)
pf.error(errmsg)
sys.exit()
def checkAllModules():
"""Check the existence of all known modules.
This also sorts the modules alphabetically
"""
[ checkModule(n,quiet=True) for n in known_modules ]
return
def checkModule(name,ver=(),fatal=False,quiet=False):
"""Check if the named Python module is available, and record its version.
ver is a tuple of:
- modname: name of the module to test import
- vername: name of the module holding the version string
- more fields are consecutive attributes leading to the version string
The obtained version string is returned, empty if the module could not
be loaded.
The (name,version) pair is also inserted into the the_version dict.
If fatal=True, pyFormex will abort if the module can not be loaded.
"""
if len(ver) == 0 and name in known_modules:
ver = known_modules[name]
modname = name
if len(ver) > 0 and len(ver[0]) > 0:
modname = ver[0]
try:
if not quiet:
pf.debug(modname,pf.DEBUG.DETECT)
m = __import__(modname)
if not quiet:
pf.debug(m,pf.DEBUG.DETECT)
if len(ver) > 1 and len(ver[1]) > 0:
modname = ver[1]
m = __import__(modname)
if not quiet:
pf.debug(m,pf.DEBUG.DETECT)
ver = ver[2:]
if len(ver) == 0:
ver = ('__version__',)
for a in ver:
m = getattr(m,a)
if not quiet:
pf.debug(m,pf.DEBUG.DETECT)
except:
# failure: unexisting or unregistered modules
if fatal:
raise
m = ''
#print("Module %s: Version %s" % (name,m))
# make sure version is a string (e.g. gl2ps uses a float!)
m = str(m)
_congratulations(name,m,'module',fatal,quiet=quiet)
#if version:
the_version[name] = m
return m
def hasExternal(name,force=False):
"""Test if we have the external command 'name' available.
Returns a nonzero string if the command is available,
or an empty string if it is not.
The external command is only checked on the first call.
The result is remembered in the the_external dict.
"""
if name in the_external and not force:
return the_external[name]
else:
return checkExternal(name)
def checkExternal(name=None,command=None,answer=None,quiet=False):
"""Check if the named external command is available on the system.
name is the generic command name,
command is the command as it will be executed to check its operation,
answer is a regular expression to match positive answers from the command.
answer should contain at least one group. In case of a match, the
contents of the match will be stored in the the_external dict
with name as the key. If the result does not match the specified answer,
an empty value is inserted.
Usually, command will contain an option to display the version, and
the answer re contains a group to select the version string from
the result.
As a convenience, we provide a list of predeclared external commands,
that can be checked by their name alone.
If no name is given, all commands in that list are checked, and no
value is returned.
"""
if name is None:
[ checkExternal(n,quiet=True) for n in known_externals.keys() ]
return
if command is None or answer is None:
cmd,ans = known_externals.get(name,(name,'(.+)\n'))
if command is None:
command = cmd
if answer is None:
answer = ans
pf.debug("Check %s\n%s" % (name,command),pf.DEBUG.DETECT)
out = system(command)[1]
pf.debug("Output:\n%s" % (out),pf.DEBUG.DETECT)
m = re.match(answer,out)
if m:
version = m.group(1)
else:
version = ''
_congratulations(name,version,'program',quiet=quiet)
#if version:
the_external[name] = version
return version
def _congratulations(name,version,typ='module',fatal=False,quiet=False,severity=2):
"""Report a detected module/program."""
if version:
if not quiet:
pf.debug("Congratulations! You have %s (%s)" % (name,version),pf.DEBUG.DETECT)
else:
if not quiet or fatal:
pf.debug("ALAS! I could not find %s '%s' on your system" % (typ,name),pf.DEBUG.DETECT)
if fatal:
pf.error("Sorry, I'm getting out of here....")
sys.exit()
def Libraries():
from lib import accelerated
acc = [ m.__name__ for m in accelerated ]
return ', '.join(acc)
def reportDetected():
notfound = '** Not Found **'
s = "%s\n" % pf.fullVersion()
s += "\nInstall type: %s\n" % pf.installtype
s += "\npyFormex C libraries: %s\n" % Libraries()
s += "\nPython version: %s\n" % sys.version
s += "\nOperating system: %s\n" % sys.platform
s += "\nDetected Python Modules:\n"
the_version.sort(sorted(the_version.keys()))
for k in the_version:
v = the_version[k]
if not v:
v = notfound
s += "%s (%s)\n" % ( k,v)
s += "\nDetected External Programs:\n"
the_external.sort(sorted(the_external.keys()))
for k in the_external:
v = the_external[k]
if not v:
v = notfound
s += "%s (%s)\n" % ( k,v)
return s
def procInfo(title):
print(title)
print('module name: %s' % __name__)
print('parent process: %s' % os.getppid())
print('process id: %s' % os.getpid())
def prefixFiles(prefix,files):
"""Prepend a prefix to a list of filenames."""
return [ os.path.join(prefix,f) for f in files ]
def matchMany(regexps,target):
"""Return multiple regular expression matches of the same target string."""
return [re.match(r,target) for r in regexps]
def matchCount(regexps,target):
"""Return the number of matches of target to regexps."""
return len(filter(None,matchMany(regexps,target)))
def matchAny(regexps,target):
"""Check whether target matches any of the regular expressions."""
return matchCount(regexps,target) > 0
def matchNone(regexps,target):
"""Check whether targes matches none of the regular expressions."""
return matchCount(regexps,target) == 0
def matchAll(regexps,target):
"""Check whether targets matches all of the regular expressions."""
return matchCount(regexps,target) == len(regexps)
def listTree(path,listdirs=True,topdown=True,sorted=False,excludedirs=[],excludefiles=[],includedirs=[],includefiles=[],symlinks=True):
"""List all files in path.
If ``listdirs==False``, directories are not listed.
By default the tree is listed top down and entries in the same directory
are unsorted.
`exludedirs` and `excludefiles` are lists of regular expressions with
dirnames, resp. filenames to exclude from the result.
`includedirs` and `includefiles` can be given to include only the
directories, resp. files matching any of those patterns.
Note that 'excludedirs' and 'includedirs' force top down handling.
If `symlinks` is set False, symbolic links are removed from the list.
"""
filelist = []
if excludedirs or includedirs:
topdown = True
for root, dirs, files in os.walk(path, topdown=topdown):
if sorted:
dirs.sort()
files.sort()
if excludedirs:
remove = [ d for d in dirs if matchAny(excludedirs,d) ]
for d in remove:
dirs.remove(d)
if includedirs:
remove = [ d for d in dirs if not matchAny(includedirs,d) ]
for d in remove:
dirs.remove(d)
if listdirs and topdown:
filelist.append(root)
if excludefiles:
files = [ f for f in files if matchNone(excludefiles,f) ]
if includefiles:
files = [ f for f in files if matchAny(includefiles,f) ]
filelist.extend(prefixFiles(root,files))
if listdirs and not topdown:
filelist.append(root)
if not symlinks:
filelist = [ f for f in filelist if not os.path.islink(f) ]
return filelist
def removeFile(filename):
"""Remove a file, ignoring error when it does not exist."""
if os.path.exists(filename):
os.remove(filename)
def removeTree(path,top=True):
"""Remove all files below path. If top==True, also path is removed."""
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if top:
os.rmdir(path)
def sourceFiles(relative=False,symlinks=True,extended=False):
"""Return a list of the pyFormex source .py files.
- `symlinks`: if False, files that are symbolic links are retained in the
list. The default is to remove them.
- `extended`: if True, the .py files in all the paths in the configured
appdirs and scriptdirs are also added.
"""
path = pf.cfg['pyformexdir']
if relative:
path = os.path.relpath(path)
files = listTree(path,listdirs=False,sorted=True,includedirs=['gui','plugins','apps','examples','lib','opengl'],includefiles=['.*\.py$'],symlinks=symlinks)
if extended:
searchdirs = [ i[1] for i in pf.cfg['appdirs'] + pf.cfg['scriptdirs'] ]
for path in set(searchdirs):
if os.path.exists(path):
files += listTree(path,listdirs=False,sorted=True,includefiles=['.*\.py$'],symlinks=symlinks)
return files
def grepSource(pattern,options='',relative=True,quiet=False):
"""Finds pattern in the pyFormex source .py files.
Uses the `grep` program to find all occurrences of some specified
pattern text in the pyFormex source .py files (including the examples).
Extra options can be passed to the grep command. See `man grep` for
more info.
Returns the output of the grep command.
"""
opts = options.split(' ')
if '-a' in opts:
opts.remove('-a')
options = ' '.join(opts)
extended = True
else:
extended = False
files = sourceFiles(relative=relative,extended=extended,symlinks=False)
cmd = "grep %s '%s' %s" % (options,pattern,' '.join(files))
sta,out = runCommand(cmd,verbose=not quiet)
return out
###################### locale ###################
def setSaneLocale(localestring=''):
"""Set a sane local configuration for LC_NUMERIC.
`localestring` is the locale string to be set, e.g. 'en_US.UTF-8'
This will change the ``LC_ALL`` setting to the specified string,
and set the ``LC_NUMBERIC`` to 'C'.
Changing the LC_NUMERIC setting is a very bad idea! It makes floating
point values to be read or written with a comma instead of a the decimal
point. Of course this makes input and output files completely incompatible.
You will often not be able to process these files any further and
create a lot of troubles for yourself and other people if you use an
LC_NUMERIC setting different from the standard.
Because we do not want to help you shoot yourself in the foot, this
function always sets ``LC_NUMERIC`` back to a sane value and we
call this function when pyFormex is starting up.
"""
import locale
locale.setlocale(locale.LC_ALL,localestring)
locale.setlocale(locale.LC_NUMERIC, 'C')
locale.setlocale(locale.LC_COLLATE, 'C')
##########################################################################
## Text conversion tools ##
############################
def strNorm(s):
"""Normalize a string.
Text normalization removes all '&' characters and converts it to lower case.
"""
return str(s).replace('&','').lower()
###################### ReST conversion ###################
#try:
# be quiet, because the import is done early
if checkModule('docutils',quiet=True):
from docutils.core import publish_string
def rst2html(text,writer='html'):
return publish_string(text,writer_name=writer)
#except ImportError:
else:
def rst2html(text,writer='html'):
return """.. note:
This is a reStructuredText message, but it is currently displayed
as plain text, because it could not be converted to html.
If you install python-docutils, you will see this text (and other
pyFormex messages) in a much nicer layout!
""" + text
def forceReST(text,underline=False):
"""Convert a text string to have it recognized as reStructuredText.
Returns the text with two lines prepended: a line with '..'
and a blank line. The text display functions will then recognize the
string as being reStructuredText. Since the '..' starts a comment in
reStructuredText, it will not be displayed.
Furthermore, if `underline` is set True, the first line of the text
will be underlined to make it appear as a header.
"""
if underline:
text = underlineHeader(text)
return "..\n\n" + text
def underlineHeader(s,char='-'):
"""Underline the first line of a text.
Adds a new line of text below the first line of s. The new line
has the same length as the first, but all characters are equal to
the specified char.
"""
i = s.find('\n')
if i < 0:
i = len(s)
return s[:i] + '\n' + char*i + s[i:]
###################### file conversion ###################
def dos2unix(infile,outfile=None):
if outfile is None:
cmd = "sed -i 's|\\r||' %s" % infile
else:
cmd = "sed -i 's|\\r||' %s > %s" % (infile,outfile)
return runCommand(cmd)
def unix2dos(infile,outfile=None):
if outfile is None:
cmd = "sed -i 's|$|\\r|' %s" % infile
else:
cmd = "sed -i 's|$|\\r|' %s > %s" % (infile,outfile)
return runCommand(cmd)
def gzip(filename,gzipped=None,remove=True,level=5):
"""Compress a file in gzip format.
Parameters:
- `filename`: input file name
- `gzipped`: output file name. If not specified, it will be set to
the input file name + '.gz'. An existing output file will be
overwritten.
- `remove`: if True (default), the input file is removed after
succesful compression
- `level`: an integer from 1..9: gzip compression level. Higher values
result in smaller files, but require longer compression times. The
default of 5 gives already a fairly good compression ratio.
Returns the name of the compressed file.
"""
import gzip
if gzipped is None:
gzipped = filename+'.gz'
fil = open(filename,'rb')
gz = gzip.open(gzipped,'wb',compresslevel=level)
gz.write(fil.read())
fil.close()
gz.close()
if remove:
removeFile(filename)
return gzipped
def gunzip(filename,unzipped=None,remove=True):
"""Uncompress a file in gzip format.
Parameters:
- `filename`: compressed input file name (usually ending in '.gz')
- `unzipped`: output file name. If not specified and `filename` ends with
'.gz', it will be set to the `filename` with the '.gz' removed.
If an empty string is specified or it is not specified and the filename
does not end in '.gz', the name of a temporary file is generated. Since
you will normally want to read something from the decompressed file, this
temporary file is not deleted after closing. It is up to the user to
delete it (using the returned file name) when he is ready with it.
- `remove`: if True (default), the input file is removed after
succesful decompression. You probably want to set this to False when
decompressing to a temporary file.
Returns the name of the decompressed file.
"""
import gzip
gz = gzip.open(filename,'rb')
if unzipped is None and filename.endswith('.gz'):
unzipped = filename[:-3]
if unzipped:
fil = open(unzipped,'wb')
else:
fil = tempFile(prefix='gunzip-',delete=False)
unzipped = fil.name
fil.write(gz.read())
gz.close()
fil.close()
if remove:
removeFile(filename)
return unzipped
##########################################################################
## File names and formats ##
############################
def all_image_extensions():
"""Return a list with all known image extensions."""
imgfmt = []
file_description = {
'all': 'All files (*)',
'ccx': 'CalCuliX files (*.dat *.inp)',
'dxf': 'AutoCAD .dxf files (*.dxf)',
'dxfall': 'AutoCAD .dxf or converted(*.dxf *.dxftext)',
'dxftext': 'Converted AutoCAD files (*.dxftext)',
'flavia' : 'flavia results (*.flavia.msh *.flavia.res)',
'gts': 'GTS files (*.gts)',
'html': 'Web pages (*.html)',
'icon': 'Icons (*.xpm)',
'img': 'Images (*.png *.jpg *.eps *.gif *.bmp)',
'inp': 'Abaqus or CalCuliX input files (*.inp)',
'neu': 'Gambit Neutral files (*.neu)',
'off': 'OFF files (*.off)',
'pgf': 'pyFormex geometry files (*.pgf)',
'png': 'PNG images (*.png)',
'postproc': 'Postproc scripts (*_post.py *.post)',
'pyformex': 'pyFormex scripts (*.py *.pye)',
'pyf': 'pyFormex projects (*.pyf)',
'smesh': 'Tetgen surface mesh files (*.smesh)',
'stl': 'STL files (*.stl)',
'stlb': 'Binary STL files (*.stl)', # Use only for output
'surface': 'Surface model (*.off *.gts *.stl *.off.gz *.gts.gz *.stl.gz *.neu *.smesh)',
'tetgen': 'Tetgen file (*.poly *.smesh *.ele *.face *.edge *.node *.neigh)',
}
def fileDescription(ftype):
"""Return a description of the specified file type.
The description of known types are listed in a dict file_description.
If the type is unknown, the returned string has the form
``TYPE files (*.type)``
"""
if type(ftype) is list:
return map(fileDescription,ftype)
ftype = ftype.lower()
return file_description.get(ftype,"%s files (*.%s)" % (ftype.upper(),ftype))
def fileType(ftype):
"""Normalize a filetype string.
The string is converted to lower case and a leading dot is removed.
This makes it fit for use with a filename extension.
Example:
>>> fileType('pdf')
'pdf'
>>> fileType('.pdf')
'pdf'
>>> fileType('PDF')
'pdf'
>>> fileType('.PDF')
'pdf'
"""
ftype = ftype.lower()
if len(ftype) > 0 and ftype[0] == '.':
ftype = ftype[1:]
return ftype
def fileTypeFromExt(fname):
"""Derive the file type from the file name.
The derived file type is the file extension part in lower case and
without the leading dot.
Example:
>>> fileTypeFromExt('pyformex.pdf')
'pdf'
>>> fileTypeFromExt('.pyformexrc')
''
>>> fileTypeFromExt('pyformex')
''
>>> fileTypeFromExt('pyformex.pgf')
'pgf'
>>> fileTypeFromExt('pyformex.pgf.gz')
'pgf.gz'
>>> fileTypeFromExt('pyformex.gz')
'gz'
"""
name,ext = os.path.splitext(fname)
ext = fileType(ext)
if ext == 'gz':
ext1 = fileTypeFromExt(name)
if ext1:
ext = '.'.join([ext1,ext])
return ext
def fileSize(fn):
"""Return the size in bytes of the file fn"""
return os.path.getsize(fn)
def findIcon(name):
"""Return the file name for an icon with given name.
If no icon file is found, returns the question mark icon.
"""
fname = os.path.join(pf.cfg['icondir'],name) + pf.cfg['gui/icontype']
if os.path.exists(fname):
return fname
return os.path.join(pf.cfg['icondir'],'question') + pf.cfg['gui/icontype']
def projectName(fn):
"""Derive a project name from a file name.
The project name is the basename f the file without the extension.
"""
return os.path.splitext(os.path.basename(fn))[0]
def splitme(s):
return s[::2],s[1::2]
def mergeme(s1,s2):
return ''.join([a+b for a,b in zip(s1,s2)])
def mtime(fn):
"""Return the (UNIX) time of last change of file fn."""
return os.stat(fn).st_mtime
def timeEval(s,glob=None):
"""Return the time needed for evaluating a string.
s is a string with a valid Python instructions.
The string is evaluated using Python's eval() and the difference
in seconds between the current time before and after the evaluation
is printed. The result of the evaluation is returned.
This is a simple method to measure the time spent in some operation.
It should not be used for microlevel instructions though, because
the overhead of the time calls. Use Python's timeit module to measure
microlevel execution time.
"""
start = time.time()
res = eval(s,glob)
stop = time.time()
pf.message("Timed evaluation: %s seconds" % (stop-start))
return res
def countLines(fn):
"""Return the number of lines in a text file."""
sta,out = runCommand("wc %s" % fn)
if sta == 0:
return int(out.split()[0])
else:
return 0
##########################################################################
## Running external commands ##
###############################
# DEPRECATED, KEPT FOR EMERGENCIES
# currently activated by --commands option
def system1(cmd):
"""Execute an external command."""
import commands
return commands.getstatusoutput(cmd)
## def timedWait(proc, timeout, waitToKill = 1.):
## """It is an implementation of the wait() but with a timeout check.
## - `timeout`: if a project is not completed before the timeout (float, seconds) it will be terminated.
## - `waitToKill` is the delay between proc.terminate() and proc.kill().
## """
## import timer
## t = timer.Timer(0)
## while proc.poll() is None and t.seconds(rounded=False) < timeout:#check if proc is completed or timed out
## pass
## if proc.poll() is not None:
## sta = proc.poll() # returncode
## out = proc.communicate()[0] # get the stdout
## elif t.seconds(rounded=False) > timeout:
## proc.terminate()
## try:
## out = proc.stdout.read()
## except:#the stdout was not yet generated
## out = ''
## try:
## time.sleep(waitToKill)
## #proc.kill()
## killProcesses([proc.pid+1],signal=15)#VMTK use pid+1, is it general?
## except:
## pass
## sta = -20#time out code (to be decided)
## else:
## raise ValueError,"This really should not happen!"
## return sta, out
_TIMEOUT_EXITCODE = -1015
_TIMEOUT_KILLCODE = -1009
def system(cmd,timeout=None,gracetime=2.0,shell=True):
"""Execute an external command.
Parameters:
- `cmd`: a string with the command to be executed
- `timeout`: float. If specified and > 0.0, the command will time out
and be killed after the specified number of seconds.
- `gracetime`: float. The time to wait after the terminate signal was
sent in case of a timeout, before a forced kill is done.
- `shell`: if True (default) the command is run in a new shell
Returns:
- `sta`: exit code of the command. In case of a timeout this will be
`utils._TIMEOUT_EXITCODE`, or `utils._TIMEOUT_KILLCODE` if the command
had to be forcedly killed. Otherwise, the exitcode of the command
itself is returned.
- `out`: stdout produced by the command
- `err`: stderr produced by the command
"""
from subprocess import PIPE,Popen
from threading import Timer
def terminate(p):
"""Terminate a subprocess when it times out"""
if p.poll() is None:
print("Subprocess terminated due to timeout (%ss)" % timeout)
p.terminate()
p.returncode = _TIMEOUT_EXITCODE
time.sleep(0.1)
if p.poll() is None:
# Give the process 2 seconds to terminate, then kill it
time.sleep(gracetime)
if p.poll() is None:
print("Subprocess killed")
p.kill()
p.returncode = _TIMEOUT_KILLCODE
P = Popen(cmd,shell=True,stdout=PIPE,stderr=PIPE)
if timeout > 0.0:
# Start a timer
t = Timer(timeout,terminate,[P])
t.start()
else:
t = None
# start the process and wait for it to finish
out,err = P.communicate() # get the stdout and stderr
sta = P.returncode
if t:
# cancel the timer if one was started
t.cancel()
return sta,out,err
def runCommand(cmd,timeout=None,verbose=True):
"""Run an external command in a user friendly way.
This uses the :func:`system` function to run an external command,
adding some extra user notifications of what is happening.
If no error occurs, the (sta,out) obtained form the :func:`system`
function are returned. The value sta will be zero, unless a timeout
condition has occurred, in which case sta will be -15 or -9.
If the :func:`system` call returns with an error that is not a timeout,
Parameters:
- `cmd`: a string with the command to be executed
- `timeout`: float. If specified and > 0.0, the command will time out
and be killed after the specified number of seconds.
- `verbose`: boolean. If True (default), a message including the command
is printed before it is run and in case of a nonzero exit, the full
stdout, exit status and stderr are printed (in that order).
If no error occurs in the execution of the command by the :func:`system`
function, returns a tuple
- `sta`: 0, or a negative value in case of a timeout condition
- `out`: stdout produced by the command, with the last newline removed
Example:
cmd = 'sleep 2'
sta,out=runCommand3(cmd,quiet=False, timeout=5.)
print (sta,out)
"""
if verbose:
print("Running command: %s" % cmd)
pf.debug("Command: %s" % cmd,pf.DEBUG.INFO)
sta,out,err = system(cmd,timeout)
if sta != 0:
if timeout > 0.0 and sta in [ _TIMEOUT_EXITCODE, _TIMEOUT_KILLCODE ]:
pass
else:
if verbose:
print(out)
print("Command exited with an error (exitcode %s)" % sta)
print(err)
raise RuntimeError, "Error while executing command:\n %s" % cmd
return sta,out.rstrip('\n')
def spawn(cmd):
"""Spawn a child process."""
cmd = cmd.split()
pid = os.spawnvp(os.P_NOWAIT,cmd[0],cmd)
pf.debug("Spawned child process %s for command '%s'" % (pid,cmd),pf.DEBUG.INFO)
return pid
def killProcesses(pids,signal=15):
"""Send the specified signal to the processes in list
- `pids`: a list of process ids.
- `signal`: the signal to send to the processes. The default (15) will
try to terminate the process. See 'man kill' for more values.
"""
for pid in pids:
try:
os.kill(pid,signal)
except:
pf.debug("Error in killing of process '%s'" % pid,pf.DEBUG.INFO)
def changeExt(fn,ext):
"""Change the extension of a file name.
The extension is the minimal trailing part of the filename starting
with a '.'. If the filename has no '.', the extension will be appended.
If the given extension does not start with a dot, one is prepended.
Example:
>>> changeExt('image.png','.jpg')
'image.jpg'
>>> changeExt('image','.jpg')
'image.jpg'
>>> changeExt('image','jpg')
'image.jpg'
"""
if not ext.startswith('.'):
ext = ".%s" % ext
return os.path.splitext(fn)[0] + ext
def tildeExpand(fn):
"""Perform tilde expansion on a filename.
Bash, the most used command shell in Linux, expands a '~' in arguments
to the users home direction.
This function can be used to do the same for strings that did not receive
the bash tilde expansion, such as strings in the configuration file.
"""
return fn.replace('~',os.environ['HOME'])
def userName():
"""Find the name of the user."""
try:
return os.environ['LOGNAME']
except:
return 'NOBODY'
def is_script(appname):
"""Checks whether an application name is rather a script name"""
return appname.endswith('.py') or appname.endswith('.pye')
def is_app(appname):
return not is_script(appname)
is_pyFormex = is_script
## def is_pyFormex(filename):
## """Checks whether a file is a pyFormex script.
## A file is considered to be a pyFormex script if its name ends in '.py'
## and the first line of the file contains the substring 'pyformex'.
## Typically, a pyFormex script starts with a line::
## # *** pyformex ***
## """
## filename = str(filename) # force it into a string
## if filename.endswith(".pye"):
## return True
## ok = filename.endswith(".py")
## if ok:
## try:
## f = open(filename,'r')
## ok = f.readline().find('pyformex') >= 0
## f.close()
## except IOError:
## ok = False
## return ok
def getDocString(scriptfile):
"""Return the docstring from a script file.
This actually returns the first multiline string (delimited by
triple double quote characters) from the file.
It does relies on the script file being structured properly and
indeed including a doctring at the beginning of the file.
"""
fil = open(scriptfile,'r')
s = fil.read()
i = s.find('"""')
if i >= 0:
j = s.find('"""',i+1)
if j >= i+3:
return s[i+3:j]
return ''
tempFile = tempfile.NamedTemporaryFile
tempDir = tempfile.mkdtemp
def numsplit(s):
"""Split a string in numerical and non-numerical parts.
Returns a series of substrings of s. The odd items do not contain
any digits. Joined together, the substrings restore the original.
The even items only contain digits.
The number of items is always odd: if the string ends or starts with a
digit, the first or last item is an empty string.
Example:
>>> print(numsplit("aa11.22bb"))
['aa', '11', '.', '22', 'bb']
>>> print(numsplit("11.22bb"))
['', '11', '.', '22', 'bb']
>>> print(numsplit("aa11.22"))
['aa', '11', '.', '22', '']
"""
return digits.split(s)
def hsorted(l):
"""Sort a list of strings in human order.
When human sort a list of strings, they tend to interprete the
numerical fields like numbers and sort these parts numerically,
instead of the lexicographic sorting by the computer.
Returns the list of strings sorted in human order.
Example:
>>> hsorted(['a1b','a11b','a1.1b','a2b','a1'])
['a1', 'a1.1b', 'a1b', 'a2b', 'a11b']
"""
def human(s):
s = digits.split(s)+['0']
return zip(s[0::2], map(int, s[1::2]))
return sorted(l,key=human)
def splitDigits(s,pos=-1):
"""Split a string at a sequence of digits.
The input string is split in three parts, where the second part is
a contiguous series of digits. The second argument specifies at which
numerical substring the splitting is done. By default (pos=-1) this is
the last one.
Returns a tuple of three strings, any of which can be empty. The
second string, if non-empty is a series of digits. The first and last
items are the parts of the string before and after that series.
Any of the three return values can be an empty string.
If the string does not contain any digits, or if the specified splitting
position exceeds the number of numerical substrings, the second and
third items are empty strings.
Example:
>>> splitDigits('abc123')
('abc', '123', '')
>>> splitDigits('123')
('', '123', '')
>>> splitDigits('abc')
('abc', '', '')
>>> splitDigits('abc123def456fghi')
('abc123def', '456', 'fghi')
>>> splitDigits('abc123def456fghi',0)
('abc', '123', 'def456fghi')
>>> splitDigits('123-456')
('123-', '456', '')
>>> splitDigits('123-456',2)
('123-456', '', '')
>>> splitDigits('')
('', '', '')
"""
g = numsplit(s)
n = len(g)
i = 2*pos
if i >= -n and i+1 < n:
if i >= 0:
i += 1
#print(g,i,n)
return ''.join(g[:i]),g[i],''.join(g[i+1:])
else:
return s,'',''
# BV: We could turn this into a factory
class NameSequence(object):
"""A class for autogenerating sequences of names.
The name is a string including a numeric part, which is incremented
at each call of the 'next()' method.
The constructor takes name template and a possible extension as arguments.
If the name starts with a non-numeric part, it is taken as a constant
part.
If the name ends with a numeric part, the next generated names will
be obtained by incrementing this part. If not, a string '-000' will
be appended and names will be generated by incrementing this part.
If an extension is given, it will be appended as is to the names.
This makes it possible to put the numeric part anywhere inside the
names.
Example:
>>> N = NameSequence('abc.98')
>>> [ N.next() for i in range(3) ]
['abc.98', 'abc.99', 'abc.100']
>>> N = NameSequence('abc-8x.png')
>>> [ N.next() for i in range(3) ]
['abc-8x.png', 'abc-9x.png', 'abc-10x.png']
>>> NameSequence('abc','.png').next()
'abc-000.png'
>>> N = NameSequence('/home/user/abc23','5.png')
>>> [ N.next() for i in range(2) ]
['/home/user/abc235.png', '/home/user/abc245.png']
"""
def __init__(self,name,ext=''):
"""Create a new NameSequence from name,ext."""
prefix,number,suffix = splitDigits(name)
if len(number) > 0:
self.nr = int(number)
format = "%%0%dd" % len(number)
else:
self.nr = 0
format = "-%03d"
self.name = prefix+format+suffix+ext
def next(self):
"""Return the next name in the sequence"""
fn = self.name % self.nr
self.nr += 1
return fn
def peek(self):
"""Return the next name in the sequence without incrementing."""
return self.name % self.nr
def glob(self):
"""Return a UNIX glob pattern for the generated names.
A NameSequence is often used as a generator for file names.
The glob() method returns a pattern that can be used in a
UNIX-like shell command to select all the generated file names.
"""
i = self.name.find('%')
j = self.name.find('d',i)
return self.name[:i]+'*'+self.name[j+1:]
def files(self,sort=hsorted):
"""Return a (sorted) list of files matching the name pattern.
A function may be specified to sort/filter the list of file names.
The function should take a list of filenames as input. The output
of the function is returned. The default sort function will sort
the filenames in a human order.
"""
import glob
files = glob.glob(self.glob())
if callable(sort):
files = sort(files)
return files
def prefixDict(d,prefix=''):
"""Prefix all the keys of a dict with the given prefix.
- `d`: a dict where all the keys are strings.
- `prefix`: a string
The return value is a dict with all the items of d, but where the
keys have been prefixed with the given string.
"""
return dict([ (prefix+k,v) for k,v in d.items() ])
def subDict(d,prefix='',strip=True):
"""Return a dict with the items whose key starts with prefix.
- `d`: a dict where all the keys are strings.
- `prefix`: a string
- `strip`: if True (default), the prefix is stripped from the keys.
The return value is a dict with all the items from d whose key starts
with prefix. The keys in the returned dict will have the prefix
stripped off, unless strip=False is specified.
"""
if strip:
return dict([ (k.replace(prefix,'',1),v) for k,v in d.items() if k.startswith(prefix)])
else:
return dict([ (k,v) for k,v in d.items() if k.startswith(prefix)])
def selectDict(d,keys):
"""Return a dict with the items whose key is in keys.
- `d`: a dict where all the keys are strings.
- `keys`: a set of key values, can be a list or another dict.
The return value is a dict with all the items from d whose key
is in keys.
See :func:`removeDict` for the complementary operation.
Example:
>>> d = dict([(c,c*c) for c in range(6)])
>>> selectDict(d,[4,0,1])
{0: 0, 1: 1, 4: 16}
"""
return dict([ (k,d[k]) for k in set(d)&set(keys) ])
def removeDict(d,keys):
"""Remove a set of keys from a dict.
- `d`: a dict
- `keys`: a set of key values
The return value is a dict with all the items from `d` whose key
is not in `keys`.
This is the complementary operation of selectDict.
Example:
>>> d = dict([(c,c*c) for c in range(6)])
>>> removeDict(d,[4,0])
{1: 1, 2: 4, 3: 9, 5: 25}
"""
return dict([ (k,d[k]) for k in set(d)-set(keys) ])
def refreshDict(d,src):
"""Refresh a dict with values from another dict.
The values in the dict d are update with those in src.
Unlike the dict.update method, this will only update existing keys
but not add new keys.
"""
d.update(selectDict(src,d))
def inverseDict(d):
return dict([(v,k) for k,v in d.items()])
def sortedKeys(d):
"""Returns the sorted keys of a dict.
It is required that the keys of the dict be sortable, e.g. all strings
or integers.
"""
k = d.keys()
k.sort()
return k
def stuur(x,xval,yval,exp=2.5):
"""Returns a (non)linear response on the input x.
xval and yval should be lists of 3 values:
``[xmin,x0,xmax], [ymin,y0,ymax]``.
Together with the exponent exp, they define the response curve
as function of x. With an exponent > 0, the variation will be
slow in the neighbourhood of (x0,y0).
For values x < xmin or x > xmax, the limit value ymin or ymax
is returned.
"""
xmin,x0,xmax = xval
ymin,y0,ymax = yval
if x < xmin:
return ymin
elif x < x0:
xr = float(x-x0) / (xmin-x0)
return y0 + (ymin-y0) * xr**exp
elif x < xmax:
xr = float(x-x0) / (xmax-x0)
return y0 + (ymax-y0) * xr**exp
else:
return ymax
def listFontFiles():
"""List all fonts known to the system.
Returns a list of path names to all the font files found on the system.
"""
cmd = "fc-list : file | sed 's|.*file=||;s|:||'"
sta,out = runCommand(cmd)
if sta:
warning("fc-list could not find your font files.\nMaybe you do not have fontconfig installed?")
else:
return [ f.strip() for f in out.split('\n') ]
###########################################################################
def interrogate(item):
"""Print useful information about item."""
import odict
info = odict.ODict()
if hasattr(item, '__name__'):
info["NAME: "] = item.__name__
if hasattr(item, '__class__'):
info["CLASS: "] = item.__class__.__name__
info["ID: "] = id(item)
info["TYPE: "] = type(item)
info["VALUE: "] = repr(item)
info["CALLABLE:"] = callable(item)
if hasattr(item, '__doc__'):
doc = getattr(item, '__doc__')
doc = doc.strip() # Remove leading/trailing whitespace.
firstline = doc.split('\n')[0]
info["DOC: "] = firstline
for i in info.items():
print("%s %s"% i)
def memory_report(keys=None):
"""Return info about memory usage"""
sta,out,err = system('cat /proc/meminfo')
res = {}
for line in out.split('\n'):
try:
k,v = line.split(':')
k = k.strip()
v = v.replace('kB','').strip()
res[k] = int(v)
except:
break
res['MemUsed'] = res['MemTotal'] - res['MemFree'] - res['Buffers'] - res['Cached']
if keys:
res = selectDict(res,keys)
return res
_warn_category = { 'U': UserWarning, 'D':DeprecationWarning }
def saveWarningFilter(message,module='',category=UserWarning):
cat = inverseDict(_warn_category).get(category,'U')
oldfilters = pf.prefcfg['warnings/filters']
newfilters = oldfilters + [(str(message),'',cat)]
pf.prefcfg.update({'filters':newfilters},name='warnings')
pf.debug("Future warning filters: %s" % pf.prefcfg['warnings/filters'],pf.DEBUG.WARNING)
def filterWarning(message,module='',cat='U',action='ignore'):
import warnings
pf.debug("Filter Warning '%s' from module '%s' cat '%s'" % (message,module,cat),pf.DEBUG.WARNING)
category = _warn_category.get(cat,Warning)
warnings.filterwarnings(action,message,category,module)
def warn(message,level=UserWarning,stacklevel=3):
import warnings
warnings.warn(message,level,stacklevel)
# BEWARE: Do not use yet: DeprecationWarnings are not shown by default
def deprec(message,stacklevel=3):
warn(message,level=DeprecationWarning,stacklevel=stacklevel)
def deprecation(message):
def decorator(func):
def wrapper(*_args,**_kargs):
warn(message)
# For some reason these messages are not auto-appended to
# the filters for the currently running program
filterWarning(str(message))
return func(*_args,**_kargs)
return wrapper
return decorator
def deprecated(replacement):
def decorator(func):
def wrapper(*_args,**_kargs):
"""This function is deprecated."""
print("! Function '%s' is deprecated: use '%s.%s' instead" % (func.func_name,replacement.__module__,replacement.func_name))
return replacement(*_args,**_kargs)
return wrapper
decorator.__doc__ = replacement.__doc__
return decorator
def functionWasRenamed(replacement,text=None):
def decorator(func):
def wrapper(*_args,**_kargs):
print("! Function '%s' is deprecated: use '%s' instead" % (func.func_name,replacement.func_name))
return replacement(*_args,**_kargs)
return wrapper
decorator.__doc__ = replacement.__doc__
return decorator
def functionBecameMethod(replacement):
def decorator(func):
def wrapper(object,*args,**kargs):
print("! Function %s is deprecated: use method %s instead" % (func.func_name,replacement))
repfunc = getattr(object,replacement)
return repfunc(*args,**kargs)
return wrapper
return decorator
### End
|
dladd/pyFormex
|
pyformex/utils.py
|
Python
|
gpl-3.0
| 47,793
|
[
"VTK"
] |
64405fa32360722dacde0a7c807f20909307a0bf17369c30cc412d551b33362f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Define miscellaneous utilities.
"""
from __future__ import absolute_import
from debtcollector import removals
from collections import Iterable, OrderedDict
from itertools import tee
import os
import sys
import subprocess
import six
from six.moves import range, zip # pylint: disable=redefined-builtin
__all__ = (
'default_opener',
'flatten',
'get_fobj',
'is_string_like',
'quasilexico_key',
'ordered_partitions',
'OrderedDict',
'partitions',
'extended_partition',
'partition_set',
'powerset',
'product_maker',
'require_keys',
'str_product',
'digits',
'pairwise',
)
def default_opener(filename): # pragma: no cover
"""Opens `filename` using system's default program.
Parameters
----------
filename : str
The path of the file to be opened.
"""
cmds = {'darwin': ['open'],
'linux2': ['xdg-open'], # Python 2.x
'linux': ['xdg-open'], # Python 3.x
'win32': ['cmd.exe', '/c', 'start', '']}
cmd = cmds[sys.platform] + [filename]
subprocess.call(cmd)
def flatten(l):
"""Flatten an irregular list of lists.
Parameters
----------
l : iterable
The object to be flattened.
Yields
-------
el : object
The non-iterable items in `l`.
"""
for el in l:
if isinstance(el, Iterable) and not (isinstance(el, six.string_types) and len(el) == 1):
for sub in flatten(el):
yield sub
else:
yield el
def get_fobj(fname, mode='w+'): # pragma: no cover
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If *fname*
is a file object, then we do nothing and ignore the specified *mode*
parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string or file descriptor, then *close* will be *True*
to signify that the file object should be closed. Otherwise, *close*
will be *False* signifying that the user has opened the file object and
that we should not close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = True
return fobj, close
def is_string_like(obj):
"""Returns *True* if *obj* is string-like, and *False* otherwise."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def quasilexico_key(x):
"""Returns a key suitable for a quasi-lexicographic sort [1]_.
Objects are sorted by length first, then lexicographically.
Examples
--------
>>> L = ['a', 'aa', 'b']
>>> sorted(L, key=quasilexico_key)
['a', 'b', 'aa']
References
----------
.. [1] Calude, Cristian (1994). Information and randomness. An algorithmic
perspective. EATCS Monographs on Theoretical Computer Science.
Springer-Verlag. p. 1.
"""
return (len(x), x)
def partition_set(elements, relation=None, innerset=False, reflexive=False,
transitive=False):
"""Returns the equivlence classes from `elements`.
Given `relation`, we test each element in `elements` against the other
elements and form the equivalence classes induced by `relation`. By
default, we assume the relation is symmetric. Optionally, the relation
can be assumed to be reflexive and transitive as well. All three
properties are required for `relation` to be an equivalence relation.
However, there are times when a relation is not reflexive or transitive.
For example, floating point comparisons do not have these properties. In
this instance, it might be desirable to force reflexivity and transitivity
on the elements and then, work with the resulting partition.
Parameters
----------
elements : iterable
The elements to be partitioned.
relation : function, None
A function accepting two elements, which returns `True` iff the two
elements are related. The relation need not be an equivalence relation,
but if `reflexive` and `transitive` are not set to `False`, then the
resulting partition will not be unique. If `None`, then == is used.
innerset : bool
If `True`, then the equivalence classes will be returned as frozensets.
This means that duplicate elements (according to __eq__ not `relation`)
will appear only once in the equivalence class. If `False`, then the
equivalence classes will be returned as lists. This means that
duplicate elements will appear multiple times in an equivalence class.
reflexive : bool
If `True`, then `relation` is assumed to be reflexive. If `False`, then
reflexivity will be enforced manually. Effectively, a new relation
is considered: relation(a,b) AND relation(b,a).
transitive : bool
If `True`, then `relation` is assumed to be transitive. If `False`, then
transitivity will be enforced manually. Effectively, a new relation is
considered: relation(a,b) for all b in the class.
Returns
-------
eqclasses : list
The collection of equivalence classes.
lookup : list
A list relating where lookup[i] contains the index of the eqclass
that elements[i] was mapped to in `eqclasses`.
"""
if relation is None:
from operator import eq
relation = eq
lookup = []
if reflexive and transitive:
eqclasses = []
for _, element in enumerate(elements):
for eqclass_idx, (representative, eqclass) in enumerate(eqclasses):
if relation(representative, element):
eqclass.append(element)
lookup.append(eqclass_idx)
# Each element can belong to *one* equivalence class
break
else:
lookup.append(len(eqclasses))
eqclasses.append((element, [element]))
eqclasses = [c for _, c in eqclasses]
else:
def belongs(element, eqclass):
for representative in eqclass:
if not relation(representative, element):
return False
if not reflexive:
if not relation(element, representative):
return False
if transitive:
return True
else:
# Test against all members
# python optimizes away this line, so it never actually
# gets executed during tests...
continue # pragma: no cover
# Then it equals all memembers symmetrically.
return True
eqclasses = []
for _, element in enumerate(elements):
for eqclass_idx, eqclass in enumerate(eqclasses):
if belongs(element, eqclass):
eqclass.append(element)
lookup.append(eqclass_idx)
# Each element can belong to one equivalence class
break
else:
lookup.append(len(eqclasses))
eqclasses.append([element])
if innerset:
eqclasses = [frozenset(c) for c in eqclasses]
else:
eqclasses = [tuple(c) for c in eqclasses]
return eqclasses, lookup
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
from itertools import chain, combinations
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def product_maker(func):
"""
Returns a customized product function.
itertools.product yields tuples. Sometimes, one desires some other type
of object---for example, strings. This function transforms the output
of itertools.product, returning a customized product function.
Parameters
----------
func : callable
Any function which accepts an iterable as the single input argument.
The iterates of itertools.product are transformed by this callable.
Returns
-------
_product : callable
A customized itertools.product function.
"""
from itertools import product
def _product(*args, **kwargs):
for prod in product(*args, **kwargs):
yield func(prod)
return _product
str_product = product_maker(''.join)
def require_keys(keys, dikt):
"""Verifies that keys appear in the specified dictionary.
Parameters
----------
keys : list of str
List of required keys.
dikt : dict
The dictionary that is checked for keys.
Returns
-------
None
Raises
------
Exception
Raised when a required key is not present.
"""
dikt_keys = set(dikt)
for key in keys:
if key not in dikt_keys:
msg = "'%s' is required." % (key,)
raise Exception(msg)
def partitions1(set_):
"""
Generates partitions of elements in `set_'.
For set_ = range(12), this finishes in 52.37 seconds.
Yields tuple of sets.
"""
# Thomas Dybdahl Ahle (https://github.com/thomasahle)
# Source:
# http://compprog.wordpress.com/2007/10/15/generating-the-partitions-of-a-set
if not set_:
yield ()
return
for i in range(2**len(set_) // 2): # 2**() is even, so using // is safe.
parts = [set(), set()]
for item in set_:
parts[i&1].add(item)
i >>= 1
for b in partitions1(parts[1]):
yield (parts[0],) + b
def partitions2(n):
"""
Generates all partitions of {1,...,n}.
For n=12, this finishes in 4.48 seconds.
"""
# Original source: George Hutchinson [CACM 6 (1963), 613--614]
#
# This implementation is:
# Algorithm H (Restricted growth strings in lexicographic order)
# from pages 416--417 of Knuth's The Art of Computer Programming, Vol 4A:
# Combinatorial Problems, Part 1. 1st Edition (2011).
# ISBN-13: 978-0-201-03804-0
# ISBN-10: 0-201-03804-8
#
# To maintain notation with Knuth, we ignore the first element of
# each array so that a[j] == a_j, b[j] == b_j for j = 1,...,n.
# H1 [Initialize.]
# Per above, make lists larger by one element to give 1-based indexing.
if n == 0:
yield [[]]
elif n == 1:
yield [[0]]
else:
a = [0] * (n+1)
b = [1] * (n)
m = 1
while True:
# H2 [Visit.]
yield a[1:]
if a[n] == m:
# H4 [Find $j$.]
j = n - 1
while a[j] == b[j]:
j -= 1
# H5 [Increase $a_j$.]
if j == 1:
break
else:
a[j] += 1
# H6 [Zero out $a_{j+1} \ldots a_n$]
m = b[j]
if a[j] == b[j]: # Iverson braket
m += 1
j += 1
while j < n:
a[j] = 0
b[j] = m
j += 1
a[n] = 0
else:
# H3
a[n] += 1
def partitions(seq, tuples=False):
"""
Generates all partitions of `seq`.
Parameters
----------
seq : iterable
Any iterable. Used to generate the partitions.
tuples : bool
If `True`, yields tuple of tuples. Otherwise, yields frozenset of
frozensets.
Yields
------
partition : frozenset or tuple
A frozenset of frozensets, or a sorted tuple of sorted tuples.
"""
# Handle iterators.
seq = list(seq)
if tuples:
for partition in partitions1(seq):
# Convert the partition into a list of sorted tuples.
partition = map(tuple, map(sorted, partition))
# Convert the partition into a sorted tuple of sorted tuples.
# Sort by smallest parts first, then lexicographically.
partition = tuple(sorted(partition, key=quasilexico_key))
yield partition
else:
for partition in partitions1(seq):
partition = frozenset(map(frozenset, partition))
yield partition
def extended_partition(outcomes, indices, part, ctr):
"""
Expand a partition over a subset of outcome indices to the entire outcome.
Parameters
----------
outcomes : list
List of outcomes.
indices : [int]
A list of indices corresponding to which indices of each outcome an
element of `part` corresponds to.
part : frozenset[frozenset]
A partition on some subset of indices of the outcomes.
ctr : func
The constructor to build sub-outcomes from.
Returns
-------
partition : frozenset[frozenset]
The expanded partition.
"""
return frozenset([frozenset([o for o in outcomes if ctr(o[i] for i in indices) in p]) for p in part])
def ordered_partitions(seq, tuples=False):
"""
Generates ordered partitions of elements in `seq`.
Parameters
----------
seq : iterable
Any iterable. Used to generate the partitions.
tuples : bool
If `True`, yields tuple of tuples. Otherwise, yields tuple of
frozensets.
Yields
------
partition : tuple
A tuple of frozensets, or a tuple of sorted tuples.
"""
from itertools import permutations
# Handle iterators.
seq = list(seq)
if tuples:
for partition in partitions1(seq):
# Convert the partition into a list of sorted tuples.
partition = list(map(tuple, map(sorted, partition)))
# Generate all permutations.
for perm in permutations(partition):
yield perm
else:
for partition in partitions1(seq):
partition = list(map(frozenset, partition))
for perm in permutations(partition):
yield perm
def digits(n, base, alphabet=None, pad=0, big_endian=True):
"""
Returns `n` as a sequence of indexes into an alphabet.
Parameters
----------
n : int
The number to convert into a sequence of indexes.
base : int
The desired base of the sequence representation. The base must be
greater than or equal to 2.
alphabet : iterable
If specified, then the indexes are converted into symbols using
the specified alphabet.
pad : int
If `True`, the resultant sequence is padded with zeros.
big_endian : bool
If `True`, then the resultant sequence has the least significant
digits at the end. This is standard for Western culture.
Returns
-------
sequence : list
The digits representation of `n`.
Examples
--------
>>> digits(6, base=2, pad=4)
[0, 1, 1, 0]
>>> ''.join(digits(6, base=2, pad=4, alphabet='xo'))
'xoox'
"""
# http://stackoverflow.com/a/2088440
if base < 2 or int(base) != base:
raise ValueError('`base` must be an integer greater than 2')
if alphabet is not None:
if len(alphabet) != base:
raise ValueError('Length of `alphabet` must equal `base`.')
sequence = []
while True:
sequence.append(n % base)
if n < base:
break
n //= base
if pad:
zeros = [0] * (pad - len(sequence))
sequence.extend(zeros)
if big_endian:
sequence.reverse()
if alphabet is not None:
sequence = [alphabet[i] for i in sequence]
return sequence
@removals.remove(message="Use boltons.iterutils.pairwise instead.",
version="1.0.1")
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
|
Autoplectic/dit
|
dit/utils/misc.py
|
Python
|
bsd-3-clause
| 16,425
|
[
"VisIt"
] |
c2009d045db21f80a4b664d3bf4615d71ffcabe2629fd6d5fd11f1106af8ebc9
|
# This module implements trajetories and trajectory generators.
#
# Written by Konrad Hinsen
# last revision: 2002-5-10
#
import Collection, Units, Universe, Utility, ParticleProperties, Visualization
from Scientific.Geometry import Vector
import Numeric, copy, os, string, sys, types
# Report error if the netCDF module is not available.
try:
from Scientific.IO import NetCDF
except ImportError:
NetCDF = None
def checkNetCDF():
if NetCDF is None:
raise Utility.MMTKError, \
"Trajectories are not available because the netCDF module is missing."
#
# Trajectory class
#
class Trajectory:
"""Trajectory file
Constructor: Trajectory(|object|, |filename|, |mode|="r",
|comment|=None, |double_precision|=0, |cycle|=0,
|block_size|=1)
Arguments:
|object| -- the object whose data is stored in the trajectory file.
This can be 'None' when opening a file for reading;
in that case, a universe object is constructed from the
description stored in the trajectory file. This universe
object can be accessed via the attribute 'universe'
of the trajectory object.
|filename| -- the name of the trajectory file
|mode| -- one of "r" (read-only), "w" (create new file for writing),
or "a" (append to existing file or create if the file does
not exist)
|comment| -- optional comment that is stored in the file; allowed only
with mode="r"
|double_precision| -- if non-zero, data in the file is stored using
double precision; default is single precision.
Note that all I/O via trajectory objects is
double precision; conversion from and to single
precision file variables is handled automatically.
|cycle| -- if non-zero, a trajectory is created for a fixed number
of steps equal to the value of |cycle|, and these steps
are used cyclically. This is meant for restart trajectories.
|block_size| -- an optimization parameter that influences the file
structure and the I/O performance for very large
files. A block size of 1 is optimal for sequential
access to configurations etc., whereas a block size
equal to the number of steps is optimal for reading
coordinates or scalar variables along the time axis.
The default value is 1. Note that older MMTK releases
always used a block size of 1 and cannot handle
trajectories with different block sizes.
The data in a trajectory file can be accessed by step or by
variable. If 't' is a Trajectory object, then:
- 'len(t)' is the number of steps
- 't[i]' is the data for step i, in the form of a dictionary that
maps variable names to data
- 't[i:j]' and 't[i:j:n]' return a SubTrajectory object that refers
to a subset of the total number of steps (no data is copied)
- 't.variable' returns the value of the named variable at all
time steps. If the variable is a simple scalar, it is read
completely and returned as an array. If the variable contains
data for each atom, a TrajectoryVariable object is returned
from which data at specific steps can be obtained by further
indexing operations.
The routines that generate trajectories decide what variables
are used and what they contain. The most frequently used variable
is "configuration", which stores the positions of all atoms.
Other common variables are "time", "velocities", "temperature",
"pressure", and various energy terms whose name end with "_energy".
"""
def __init__(self, object, filename, mode = 'r', comment = None,
double_precision = 0, cycle = 0, block_size = 1):
checkNetCDF()
filename = os.path.expanduser(filename)
self.filename = filename
if object is None and mode == 'r':
file = NetCDF.NetCDFFile(filename, 'r')
description = file.variables['description'][:].tostring()
try:
self.block_size = file.dimensions['minor_step_number']
except KeyError:
self.block_size = 1
conf = None
cell = None
if self.block_size == 1:
try:
conf_var = file.variables['configuration']
conf = conf_var[0, :, :]
except KeyError: pass
try:
cell = file.variables['box_size'][0, :]
except KeyError: pass
else:
try:
conf_var = file.variables['configuration']
conf = conf_var[0, :, :, 0]
except KeyError: pass
try:
cell = file.variables['box_size'][0, :, 0]
except KeyError: pass
file.close()
import Skeleton
local = {}
skeleton = eval(description, vars(Skeleton), local)
universe = skeleton.make({}, conf)
universe.setCellParameters(cell)
object = universe
initialize = 1
else:
universe = object.universe()
if universe is None:
raise ValueError, "objects not in the same universe"
description = None
initialize = 0
universe.configuration()
if object is universe:
index_map = None
inverse_map = None
else:
if mode == 'r':
raise ValueError, "can't read trajectory for a non-universe"
index_map = Numeric.array(map(lambda a:a.index, object.atomList()))
inverse_map = universe.numberOfPoints()*[None]
for i in range(len(index_map)):
inverse_map[index_map[i]] = i
toplevel = {}
for o in Collection.Collection(object):
toplevel[o.topLevelChemicalObject()] = 1
object = Collection.Collection(toplevel.keys())
if description is None:
description = universe.description(object, inverse_map)
import MMTK_trajectory
self.trajectory = MMTK_trajectory.Trajectory(universe, description,
index_map, filename,
mode + 's',
double_precision, cycle,
block_size)
self.universe = universe
self.index_map = index_map
try:
self.block_size = \
self.trajectory.file.dimensions['minor_step_number']
except KeyError:
self.block_size = 1
if comment is not None:
if mode == 'r':
raise IOError, 'cannot add comment in read-only mode'
self.trajectory.file.comment = comment
if initialize and conf is not None:
self.universe.setFromTrajectory(self)
self.particle_trajectory_reader = ParticleTrajectoryReader(self)
def close(self):
"""Close the trajectory file. Must be called after writing to
ensure that all buffered data is written to the file. No data
access is possible after closing a file."""
self.trajectory.close()
def __len__(self):
return self.trajectory.nsteps
def __getitem__(self, item):
if type(item) != type(0):
return SubTrajectory(self, Numeric.arange(len(self)))[item]
if item < 0:
item = item + len(self)
if item >= len(self):
raise IndexError
data = {}
for name, var in self.trajectory.file.variables.items():
if 'step_number' not in var.dimensions:
continue
if 'atom_number' in var.dimensions:
if 'xyz' in var.dimensions:
array = ParticleProperties.ParticleVector(self.universe,
self.trajectory.readParticleVector(name, item))
else:
array = ParticleProperties.ParticleScalar(self.universe,
self.trajectory.readParticleScalar(name, item))
else:
bs = self.block_size
if bs == 1:
array = var[item]
else:
if len(var.shape) == 2:
array = var[item/bs, item%bs]
else:
array = var[item/bs, ..., item%bs]
data[name] = 0.+array
if data.has_key('configuration'):
box = data.get('box_size', None)
if box is not None:
box = box.astype(Numeric.Float)
conf = data['configuration']
data['configuration'] = \
ParticleProperties.Configuration(conf.universe, conf.array, box)
return data
def __getslice__(self, first, last):
return self[(slice(first, last),)]
def __getattr__(self, name):
try:
var = self.trajectory.file.variables[name]
except KeyError:
raise AttributeError, "no variable named " + name
if 'atom_number' in var.dimensions:
return TrajectoryVariable(self.universe, self, name)
else:
return Numeric.ravel(Numeric.array(var))[:len(self)]
def defaultStep(self):
try:
step = self.trajectory.file.last_step[0]
except AttributeError:
step = 0
return step
def readParticleTrajectory(self, atom, first=0, last=None, skip=1,
variable = "configuration"):
"""Read the values of the specified |variable| for the specified
|atom| at all time steps from |first| to |last| with an
increment of |skip|. The result is a ParticleTrajectory object.
If the variable is "configuration", the resulting trajectory
is made continuous by eliminating all jumps caused by periodic
boundary conditions. The pseudo-variable "box_coordinates"
can be read to obtain the values of the variable "configuration"
scaled to box coordinates. For non-periodic universes there is
no difference between box coordinates and real coordinates."""
return ParticleTrajectory(self, atom, first, last, skip, variable)
def readRigidBodyTrajectory(self, object, first=0, last=None, skip=1,
reference = None):
"""Read the positions for the specified |object| at all time steps
from |first| to |last| with an increment of |skip| and extract
the rigid-body motion (center-of-mass position plus orientation as
a quaternion) by an optimal-transformation fit. The result is a
RigidBodyTrajectory object."""
return RigidBodyTrajectory(self, object, first, last, skip, reference)
def variables(self):
"""Returns a list of the names of all variables that are stored
in the trajectory."""
vars = copy.copy(self.trajectory.file.variables.keys())
vars.remove('step')
try:
vars.remove('description')
except ValueError: pass
return vars
def view(self, first=0, last=None, step=1, object = None):
"""Show an animation of |object| using the positions in the
trajectory at all time steps from |first| to |last| with an
increment of |skip|. |object| defaults to the entire universe."""
Visualization.viewTrajectory(self, first, last, step, object)
def _boxTransformation(self, pt_in, pt_out, to_box=0):
from MMTK_trajectory import boxTransformation
try:
box_size = self.trajectory.recently_read_box_size
except AttributeError:
return
boxTransformation(self.universe._spec,
pt_in, pt_out, box_size, to_box)
class SubTrajectory:
"""Reference to a subset of a trajectory
A SubTrajectory object is created by slicing a Trajectory object
or another SubTrajectory object. It provides all the operations
defined on Trajectory objects.
"""
def __init__(self, trajectory, indices):
self.trajectory = trajectory
self.indices = indices
self.universe = trajectory.universe
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
if type(item) == type(0):
return self.trajectory[self.indices[item]]
else:
return SubTrajectory(self.trajectory, self.indices[item])
def __getslice__(self, first, last):
return self[(slice(first, last),)]
def __getattr__(self, name):
return SubVariable(getattr(self.trajectory, name), self.indices)
def readParticleTrajectory(self, atom, first=0, last=None, skip=1,
variable = "configuration"):
if last is None:
last = len(self.indices)
indices = self.indices[first:last:skip]
first = indices[0]
last = indices[-1]+1
if len(self.indices) > 1:
skip = self.indices[1]-self.indices[0]
else:
skip = 1
return self.trajectory.readParticleTrajectory(atom, first, last,
skip, variable)
def readRigidBodyTrajectory(self, object, first=0, last=None, skip=1,
reference = None):
if last is None:
last = len(self.indices)
indices = self.indices[first:last:skip]
first = indices[0]
last = indices[-1]+1
if len(self.indices) > 1:
skip = self.indices[1]-self.indices[0]
else:
skip = 1
return RigidBodyTrajectory(self.trajectory, object,
first, last, skip, reference)
def variables(self):
return self.trajectory.variables()
def view(self, first=0, last=None, step=1, subset = None):
Visualization.viewTrajectory(self, first, last, step, subset)
def close(self):
del self.trajectory
def _boxTransformation(self, pt_in, pt_out, to_box=0):
Trajectory._boxTransformation(self.trajectory, pt_in, pt_out, to_box)
#
# Trajectory variables
#
class TrajectoryVariable:
"""Variable in a trajectory
A TrajectoryVariable object is created by extracting a variable from
a Trajectory object if that variable contains data for each atom and
is thus potentially large. No data is read from the trajectory file
when a TrajectoryVariable object is created; the read operation
takes place when the TrajectoryVariable is indexed with a specific
step number.
If 't' is a TrajectoryVariable object, then:
- 'len(t)' is the number of steps
- 't[i]' is the data for step i, in the form of a ParticleScalar,
a ParticleVector, or a Configuration object, depending on the
variable
- 't[i:j]' and 't[i:j:n]' return a SubVariable object that refers
to a subset of the total number of steps
"""
def __init__(self, universe, trajectory, name):
self.universe = universe
self.trajectory = trajectory
self.name = name
self.var = self.trajectory.trajectory.file.variables[self.name]
if self.name == 'configuration':
try:
self.box_size = \
self.trajectory.trajectory.file.variables['box_size']
except KeyError:
self.box_size = None
def __len__(self):
return len(self.trajectory)
def __getitem__(self, item):
if type(item) != type(0):
return SubVariable(self, Numeric.arange(len(self)))[item]
if item < 0:
item = item + len(self.trajectory)
if item >= len(self.trajectory):
raise IndexError
if self.name == 'configuration':
if self.box_size is None:
box = None
elif len(self.box_size.shape) == 3:
bs = self.trajectory.block_size
box = self.box_size[item/bs, :, item%bs].astype(Numeric.Float)
else:
box = self.box_size[item].astype(Numeric.Float)
array = ParticleProperties.Configuration(self.universe,
self.trajectory.trajectory.readParticleVector(self.name, item),
box)
elif 'xyz' in self.var.dimensions:
array = ParticleProperties.ParticleVector(self.universe,
self.trajectory.trajectory.readParticleVector(self.name, item))
else:
array = ParticleProperties.ParticleScalar(self.universe,
self.trajectory.trajectory.readParticleScalar(self.name, item))
return array
def __getslice__(self, first, last):
return self[(slice(first, last),)]
def average(self):
sum = self[0]
for value in self[1:]:
sum = sum + value
return sum/len(self)
class SubVariable(TrajectoryVariable):
"""Reference to a subset of a TrajectoryVariable
A Glossary:Subclass of Class:MMTK.Trajectory.TrajectoryVariable.
A SubVariable object is created by slicing a TrajectoryVariable
object or another SubVariable object. It provides all the operations
defined on TrajectoryVariable objects.
"""
def __init__(self, variable, indices):
self.variable = variable
self.indices = indices
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
if type(item) == type(0):
return self.variable[self.indices[item]]
else:
return SubVariable(self.variable, self.indices[item])
def __getslice__(self, first, last):
return self[(slice(first, last),)]
#
# Trajectory consisting of multiple files
#
class TrajectorySet:
"""Trajectory file set
A trajectory set permits to treat a sequence of trajectory files
like a single trajectory for reading data. It behaves like an
object of the class Class:MMTK.Trajectory.Trajectory. The
trajectory files must all contain data for the same system.
The variables stored in the individual files need not be the
same, but only variables common to all files can be accessed.
Constructor: TrajectorySet(|object|, |filename_list|)
Arguments:
|object| -- the object whose data is stored in the trajectory files.
This can be (and usually is) 'None';
in that case, a universe object is constructed from the
description stored in the first trajectory file. This universe
object can be accessed via the attribute 'universe'
of the trajectory set object.
|filename_list| -- a list of trajectory file names or
(filename, first_step, last_step, increment)
tuples.
Note: depending on how the sequence of trajectories was constructed,
the first configuration of each trajectory might be the same as the
last one in the preceding trajectory. To avoid counting it twice,
specify (filename, 1, None, 1) for all but the first trajectory in
the set.
"""
def __init__(self, object, filenames):
first = Trajectory(object, filenames[0])
self.universe = first.universe
self.trajectories = [first]
self.nsteps = [0, len(first)]
self.cell_parameters = []
for file in filenames[1:]:
if type(file) == type(()):
t = Trajectory(self.universe, file[0])[file[1]:file[2]:file[3]]
else:
t = Trajectory(self.universe, file)
self.trajectories.append(t)
self.nsteps.append(self.nsteps[-1]+len(t))
self.cell_parameters.append(t[0]['box_size'])
vars = {}
for t in self.trajectories:
for v in t.variables():
vars[v] = vars.get(v, 0) + 1
self.vars = []
for v, count in vars.items():
if count == len(self.trajectories):
self.vars.append(v)
def close(self):
for t in self.trajectories:
t.close()
def __len__(self):
return self.nsteps[-1]
def __getitem__(self, item):
if type(item) != type(0):
return SubTrajectory(self, Numeric.arange(len(self)))[item]
if item >= len(self):
raise IndexError
tindex = Numeric.add.reduce(Numeric.greater_equal(item, self.nsteps))-1
return self.trajectories[tindex][item-self.nsteps[tindex]]
def __getslice__(self, first, last):
return self[(slice(first, last),)]
def __getattr__(self, name):
if name not in self.vars:
raise AttributeError, "no variable named " + name
var = self.trajectories[0].trajectory.file.variables[name]
if 'atom_number' in var.dimensions:
return TrajectorySetVariable(self.universe, self, name)
else:
data = []
for t in self.trajectories:
var = t.trajectory.file.variables[name]
data.append(Numeric.ravel(Numeric.array(var))[:len(t)])
return Numeric.concatenate(data)
def readParticleTrajectory(self, atom, first=0, last=None, skip=1,
variable = "configuration"):
total = None
self.steps_read = []
for i in range(len(self.trajectories)):
if self.nsteps[i+1] <= first:
self.steps_read.append(0)
continue
if last is not None and self.nsteps[i] >= last:
break
n = max(0, (self.nsteps[i]-first+skip-1)/skip)
start = first+skip*n-self.nsteps[i]
n = (self.nsteps[i+1]-first+skip-1)/skip
stop = first+skip*n
if last is not None:
stop = min(stop, last)
stop = stop-self.nsteps[i]
if start >= 0 and start < self.nsteps[i+1]-self.nsteps[i]:
t = self.trajectories[i]
pt = t.readParticleTrajectory(atom, start, stop, skip,
variable)
self.steps_read.append((stop-start)/skip)
if total is None:
total = pt
else:
if variable == "configuration" \
and self.cell_parameters[0] is not None:
jump = pt.array[0]-total.array[-1]
mask = Numeric.less(jump,
-0.5*self.cell_parameters[i-1])- \
Numeric.greater(jump,
0.5*self.cell_parameters[i-1])
t._boxTransformation(pt.array, pt.array, 1)
Numeric.add(pt.array, mask[Numeric.NewAxis, :],
pt.array)
t._boxTransformation(pt.array, pt.array, 0)
elif variable == "box_coordinates" \
and self.cell_parameters[0] is not None:
jump = pt.array[0]-total.array[-1]
mask = Numeric.less(jump, -0.5)- \
Numeric.greater(jump, 0.5)
Numeric.add(pt.array, mask[Numeric.NewAxis, :],
pt.array)
total.array = Numeric.concatenate((total.array, pt.array))
else:
self.steps_read.append(0)
return total
def readRigidBodyTrajectory(self, object, first=0, last=None, skip=1,
reference = None):
return RigidBodyTrajectory(self, object, first, last, skip, reference)
def _boxTransformation(self, pt_in, pt_out, to_box=0):
n = 0
for i in range(len(self.steps_read)):
t = self.trajectories[i]
steps = self.steps_read[i]
if steps > 0:
t._boxTransformation(pt_in[n:n+steps], pt_out[n:n+steps],
to_box)
n = n + steps
## def readRigidBodyTrajectory(self, object, first=0, last=None, skip=1,
## reference = None):
## total = None
## for i in range(len(self.trajectories)):
## if self.nsteps[i+1] <= first:
## continue
## if last is not None and self.nsteps[i] >= last:
## break
## n = max(0, (self.nsteps[i]-first+skip-1)/skip)
## start = first+skip*n-self.nsteps[i]
## n = (self.nsteps[i+1]-first+skip-1)/skip
## stop = first+skip*n
## if last is not None:
## stop = min(stop, last)
## stop = stop-self.nsteps[i]
## if start >= 0 and start < self.nsteps[i+1]-self.nsteps[i]:
## t = self.trajectories[i]
## rbt = t.readRigidBodyTrajectory(object, start, stop, skip,
## reference)
## if total is None:
## total = rbt
## else:
## if self.cell_parameters[0] is not None:
## jump = rbt.cms[0]-total.cms[-1]
## mask = Numeric.less(jump,
## -0.5*self.cell_parameters[i-1])- \
## Numeric.greater(jump,
## 0.5*self.cell_parameters[i-1])
## t._boxTransformation(rbt.cms, rbt.cms, 1)
## Numeric.add(rbt.cms, mask[Numeric.NewAxis, :],
## rbt.cms)
## t._boxTransformation(rbt.cms, rbt.cms, 0)
## total.cms = Numeric.concatenate((total.cms, rbt.cms))
## total.quaternions = Numeric.concatenate((total.quaternions,
## rbt.quaternions))
## total.fit = Numeric.concatenate((total.fit, rbt.fit))
## return total
def variables(self):
return self.vars
def view(self, first=0, last=None, step=1, object = None):
Visualization.viewTrajectory(self, first, last, step, object)
class TrajectorySetVariable(TrajectoryVariable):
"""Variable in a trajectory set
A TrajectorySetVariable object is created by extracting a variable from
a TrajectorySet object if that variable contains data for each atom and
is thus potentially large. It behaves exactly like a TrajectoryVariable
object.
"""
def __init__(self, universe, trajectory_set, name):
self.universe = universe
self.trajectory_set = trajectory_set
self.name = name
def __len__(self):
return len(self.trajectory_set)
def __getitem__(self, item):
if type(item) != type(0):
return SubVariable(self, Numeric.arange(len(self)))[item]
if item >= len(self.trajectory_set):
raise IndexError
tindex = Numeric.add.reduce(Numeric.greater_equal(item,
self.trajectory_set.nsteps))-1
step = item-self.trajectory_set.nsteps[tindex]
t = self.trajectory_set.trajectories[tindex]
return getattr(t, self.name)[step]
#
# Cache for atom trajectories
#
class ParticleTrajectoryReader:
def __init__(self, trajectory):
self.trajectory = trajectory
self.natoms = self.trajectory.universe.numberOfAtoms()
self._trajectory = trajectory.trajectory
self.cache = {}
self.cache_lifetime = 2
def __call__(self, atom, variable, first, last, skip, correct, box):
if type(atom) != type(0):
index = atom.index
else:
index = atom
key = (index, variable, first, last, skip, correct, box)
data, count = self.cache.get(key, (None, 0))
if data is not None:
self.cache[key] = (data, self.cache_lifetime)
return data
delete = []
for k, value in self.cache.items():
data, count = value
count = count - 1
if count == 0:
delete.append(k)
else:
self.cache[k] = (data, count)
for k in delete:
del self.cache[k]
cache_size = min(10, max(1, 100000/max(1, len(self.trajectory))))
natoms = min(cache_size, self.natoms-index)
data = self._trajectory.readParticleTrajectories(index, natoms,
variable,
first, last, skip,
correct, box)
for i in range(natoms):
key = (index+i, variable, first, last, skip, correct, box)
self.cache[key] = (data[i], self.cache_lifetime)
return data[0]
#
# Single-atom trajectory
#
class ParticleTrajectory:
"""Trajectory data for a single particle
A ParticleTrajectory object is created by calling the method
'readParticleTrajectory' on a Trajectory object.
If 'pt' is a ParticleTrajectory object, then
- 'len(pt)' is the number of steps stored in it
- 'pt[i]' is the value at step 'i' (a vector)
"""
def __init__(self, trajectory, atom, first=0, last=None, skip=1,
variable = "configuration"):
if last is None:
last = len(trajectory)
if variable == "box_coordinates":
variable = "configuration"
box = 1
else:
box = 0
reader = trajectory.particle_trajectory_reader
self.array = reader(atom, variable, first, last, skip,
variable == "configuration", box)
def __len__(self):
return self.array.shape[0]
def __getitem__(self, index):
return Vector(self.array[index])
def translateBy(self, vector):
"""Adds |vector| to the values at all steps. This does *not*
change the data in the trajectory file."""
Numeric.add(self.array, vector.array[Numeric.NewAxis, :], self.array)
#
# Rigid-body trajectory
#
class RigidBodyTrajectory:
"""Rigid-body trajectory data
A RigidBodyTrajectory object is created by calling the method
'readRigidBodyTrajectory' on a Trajectory object.
If 'rbt' is a RigidBodyTrajectory object, then
- 'len(rbt)' is the number of steps stored in it
- 'rbt[i]' is the value at step 'i' (a vector for the center of mass
and a quaternion for the orientation)
"""
def __init__(self, trajectory, object, first=0, last=None, skip=1,
reference = None):
self.trajectory = trajectory
universe = trajectory.universe
if last is None: last = len(trajectory)
first_conf = trajectory.configuration[first]
offset = universe.contiguousObjectOffset([object], first_conf, 1)
if reference is None:
reference = first_conf
reference = universe.contiguousObjectConfiguration([object],
reference)
steps = (last-first+skip-1)/skip
mass = object.mass()
ref_cms = object.centerOfMass(reference)
atoms = object.atomList()
possq = Numeric.zeros((steps,), Numeric.Float)
cross = Numeric.zeros((steps, 3, 3), Numeric.Float)
rcms = Numeric.zeros((steps, 3), Numeric.Float)
# cms of the CONTIGUOUS object made of CONTINUOUS atom trajectories
for a in atoms:
r = trajectory.readParticleTrajectory(a, first, last, skip,
"box_coordinates").array
w = a._mass/mass
Numeric.add(rcms, w*r, rcms)
if offset is not None:
Numeric.add(rcms, w*offset[a].array, rcms)
# relative coords of the CONTIGUOUS reference
r_ref = Numeric.zeros((len(atoms), 3), Numeric.Float)
for a in range(len(atoms)):
r_ref[a] = atoms[a].position(reference).array - ref_cms.array
# main loop: storing data needed to fill M matrix
for a in range(len(atoms)):
r = trajectory.readParticleTrajectory(atoms[a],
first, last, skip,
"box_coordinates").array
r = r - rcms # (a-b)**2 != a**2 - b**2
if offset is not None:
Numeric.add(r, offset[atoms[a]].array,r)
trajectory._boxTransformation(r, r)
w = atoms[a]._mass/mass
Numeric.add(possq, w*Numeric.add.reduce(r*r, -1), possq)
Numeric.add(possq, w*Numeric.add.reduce(r_ref[a]*r_ref[a],-1),
possq)
Numeric.add(cross, w*r[:,:,Numeric.NewAxis]*r_ref[Numeric.NewAxis,
a,:],cross)
self.trajectory._boxTransformation(rcms, rcms)
# filling matrix M (formula no 40)
k = Numeric.zeros((steps, 4, 4), Numeric.Float)
k[:, 0, 0] = -cross[:, 0, 0]-cross[:, 1, 1]-cross[:, 2, 2]
k[:, 0, 1] = cross[:, 1, 2]-cross[:, 2, 1]
k[:, 0, 2] = cross[:, 2, 0]-cross[:, 0, 2]
k[:, 0, 3] = cross[:, 0, 1]-cross[:, 1, 0]
k[:, 1, 1] = -cross[:, 0, 0]+cross[:, 1, 1]+cross[:, 2, 2]
k[:, 1, 2] = -cross[:, 0, 1]-cross[:, 1, 0]
k[:, 1, 3] = -cross[:, 0, 2]-cross[:, 2, 0]
k[:, 2, 2] = cross[:, 0, 0]-cross[:, 1, 1]+cross[:, 2, 2]
k[:, 2, 3] = -cross[:, 1, 2]-cross[:, 2, 1]
k[:, 3, 3] = cross[:, 0, 0]+cross[:, 1, 1]-cross[:, 2, 2]
del cross
for i in range(1, 4):
for j in range(i):
k[:, i, j] = k[:, j, i]
Numeric.multiply(k, 2., k)
for i in range(4):
Numeric.add(k[:,i,i], possq, k[:,i,i])
del possq
quaternions = Numeric.zeros((steps, 4), Numeric.Float)
fit = Numeric.zeros((steps,), Numeric.Float)
import LinearAlgebra
for i in range(steps):
e, v = LinearAlgebra.eigenvectors(k[i])
j = Numeric.argmin(e)
if e[j] < 0.:
fit[i] = 0.
else:
fit[i] = Numeric.sqrt(e[j])
if v[j,0] < 0.: quaternions[i] = -v[j] # eliminate jumps
else: quaternions[i] = v[j]
self.fit = fit
self.cms = rcms
self.quaternions = quaternions
def __len__(self):
return self.cms.shape[0]
def __getitem__(self, index):
from Scientific.Geometry.Quaternion import Quaternion
return Vector(self.cms[index]), Quaternion(self.quaternions[index])
#
# Type check for trajectory objects
#
def isTrajectory(object):
"Returns 1 if |object| is a trajectory."
import MMTK_trajectory
return type(object) == MMTK_trajectory.trajectory_type or \
(type(object) == types.InstanceType and
object.__class__ == Trajectory)
#
# Base class for all objects that generate trajectories
#
class TrajectoryGenerator:
def __init__(self, universe, options):
checkNetCDF()
self.universe = universe
self.options = options
def setCallOptions(self, options):
self.call_options = options
def getActions(self):
try:
self.actions = self.getOption('actions')
except ValueError:
self.actions = []
try:
steps = self.getOption('steps')
except ValueError:
steps = None
return map(lambda a, t=self, s=steps: a.getSpecificationList(t, s),
self.actions)
def cleanupActions(self):
for a in self.actions:
a.cleanup()
def getOption(self, option):
try:
value = self.call_options[option]
except KeyError:
try:
value = self.options[option]
except KeyError:
try:
value = self.default_options[option]
except KeyError:
raise ValueError, 'undefined option: ' + option
return value
def optionString(self, options):
s = ''
for o in options:
s = s + o + '=' + `self.getOption(o)` + ', '
return s[:-2]
#
# Trajectory action base class
#
class TrajectoryAction:
def __init__(self, first, last, skip):
self.first = first
self.last = last
self.skip = skip
spec_type = 'function'
def _getSpecificationList(self, trajectory_generator, steps):
first = self.first
last = self.last
if first < 0:
first = first + steps
if last is None:
import MMTK_trajectory
last = MMTK_trajectory.maxint
elif last < 0:
last = last + steps+1
return (self.spec_type, first, last, self.skip)
def getSpecificationList(self, trajectory_generator, steps):
return self._getSpecificationList(trajectory_generator, steps) \
+ (self.Cfunction, self.parameters)
def cleanup(self):
pass
class TrajectoryOutput(TrajectoryAction):
"""Trajectory output action
A TrajectoryOutput object is used in the action list of any
trajectory-generating operation. It writes any of the available
data to a trajectory file. It is possible to use several
TrajectoryOutput objects at the same time in order to produce
multiple trajectories from a single run.
Constructor: TrajectoryOutput(|trajectory|, |data|=None,
|first|=0, |last|=None, |skip|=1)
Arguments:
|trajectory| -- a trajectory object or a string, which is interpreted
as the name of a file that is opened as a trajectory
in append mode
|data| -- a list of data categories. All variables provided by the
trajectory generator that fall in any of the listed categories
are written to the trajectory file. See the descriptions of
the trajectory generators for a list of variables and
categories. By default (|data| = 'None') the categories
"configuration", "energy", "thermodynamic", and "time"
are written.
|first| -- the number of the first step at which the action is executed
|last| -- the number of the last step at which the action is executed.
A value of 'None' indicates that the action should be
executed indefinitely.
|skip| -- the number of steps to skip between two applications of the
action
"""
def __init__(self, destination, categories = None,
first=0, last=None, skip=1):
TrajectoryAction.__init__(self, first, last, skip)
self.destination = destination
self.categories = categories
self.must_be_closed = None
spec_type = 'trajectory'
def getSpecificationList(self, trajectory_generator, steps):
if type(self.destination) == type(''):
destination = self._setupDestination(self.destination,
trajectory_generator.universe)
else:
destination = self.destination
if self.categories is None:
categories = self._defaultCategories(trajectory_generator)
else:
if self.categories == 'all' or self.categories == ['all']:
categories = trajectory_generator.available_data
else:
categories = self.categories
for item in categories:
if item not in trajectory_generator.available_data:
raise ValueError, \
'Data item %s is not available.' % item
return self._getSpecificationList(trajectory_generator, steps) \
+ (destination, categories)
def _setupDestination(self, destination, universe):
self.must_be_closed = Trajectory(universe, destination, 'a')
return self.must_be_closed
def cleanup(self):
if self.must_be_closed is not None:
self.must_be_closed.close()
def _defaultCategories(self, trajectory_generator):
available = trajectory_generator.available_data
return tuple(filter(lambda x, a=available: x in a, self.default_data))
default_data = ['configuration', 'energy', 'thermodynamic', 'time']
class RestartTrajectoryOutput(TrajectoryOutput):
"""Restart trajectory output action
A RestartTrajectoryOutput object is used in the action list of any
trajectory-generating operation. It writes those variables to a
trajectory that the trajectory generator declares as necessary
for restarting.
Constructor: RestartTrajectoryOutput(|trajectory|, |skip|=100, |length|=3)
Arguments:
|trajectory| -- a trajectory object or a string, which is interpreted
as the name of a file that is opened as a trajectory
in append mode with a cycle length of |length| and
double-precision variables
|skip| -- the number of steps between two write operations to the
restart trajectory
|length| -- the number of steps stored in the restart trajectory;
used only if |trajectory| is a string
"""
def __init__(self, trajectory, skip=100, length=3):
TrajectoryAction.__init__(self, 0, None, skip)
self.destination = trajectory
self.categories = None
self.length = length
def _setupDestination(self, destination, universe):
self.must_be_closed = Trajectory(universe, destination, 'a',
'Restart trajectory', 1, self.length)
return self.must_be_closed
def _defaultCategories(self, trajectory_generator):
if trajectory_generator.restart_data is None:
raise ValueError, "Trajectory generator does not permit restart"
return trajectory_generator.restart_data
class LogOutput(TrajectoryOutput):
"""Protocol file output action
A LogOutput object is used in the action list of any
trajectory-generating operation. It writes any of the available
data to a text file.
Constructor: LogOutput(|file|, |data|, |first|=0, |last|=None, |skip|=1)
Arguments:
|file| -- a file object or a string, which is interpreted as the name
of a file that is opened in write mode
|data| -- a list of data categories. All variables provided by the
trajectory generator that fall in any of the listed categories
are written to the trajectory file. See the descriptions of
the trajectory generators for a list of variables and
categories. By default (|data| = 'None') the categories
"energy" and "time" are written.
|first| -- the number of the first step at which the action is executed
|last| -- the number of the last step at which the action is executed.
A value of 'None' indicates that the action should be
executed indefinitely.
|skip| -- the number of steps to skip between two applications of the
action
"""
def _setupDestination(self, destination, universe):
self.must_be_closed = open(destination, 'w')
return self.must_be_closed
spec_type = 'print'
default_data = ['energy', 'time']
class StandardLogOutput(LogOutput):
"""Standard protocol output action
A StandardLogOutput object is used in the action list of any
trajectory-generating operation. It is a specialization of
LogOutput to the most common case and writes data in the categories
"time" and "energy" to the standard output stream.
Constructor: StandardLogOutput(|skip|=50)
Arguments:
|skip| -- the number of steps to skip between two applications of the
action
"""
def __init__(self, skip=50):
LogOutput.__init__(self, sys.stdout, None, 0, None, skip)
#
# Snapshot generator
#
class SnapshotGenerator(TrajectoryGenerator):
"""Trajectory generator for single steps
A SnapshotGenerator is used for manual assembly of trajectory
files. At each call it writes one step to the trajectory,
using the current state of the universe (configuration, velocities, etc.)
and data provided explicitly with the call.
Constructor: SnapshotGenerator(|universe|, |**options|)
Arguments:
|universe| -- the universe on which the integrator acts
|options| -- keyword options:
* data: a dictionary that supplies values for variables
that are not part of the universe state (e.g. potential energy)
* actions: a list of actions to be executed periodically (default is
none)
Each call to the SnapshotGenerator object produces one step.
All the keyword options listed above can be specified either when
creating the generator or when calling it.
"""
def __init__(self, universe, **options):
TrajectoryGenerator.__init__(self, universe, options)
self.available_data = []
try:
e, g = self.universe.energyAndGradients()
except: pass
else:
self.available_data.append('energy')
self.available_data.append('gradients')
try:
self.universe.configuration()
self.available_data.append('configuration')
except: pass
if self.universe.cellVolume() is not None:
self.available_data.append('thermodynamic')
if self.universe.velocities() is not None:
self.available_data.append('velocities')
self.available_data.append('energy')
self.available_data.append('thermodynamic')
default_options = {'steps': 0, 'actions': []}
def __call__(self, **options):
self.setCallOptions(options)
from MMTK_trajectory import snapshot
data = copy.copy(options.get('data', {}))
energy_terms = 0
for name in data.keys():
if name == 'time' and 'time' not in self.available_data:
self.available_data.append('time')
if name[-7:] == '_energy':
energy_terms = energy_terms + 1
if 'energy' not in self.available_data:
self.available_data.append('energy')
if (name == 'temperature' or name == 'pressure') \
and 'thermodynamic' not in self.available_data:
self.available_data.append('thermodynamic')
if name == 'gradients' and 'gradients' not in self.available_data:
self.available_data.append('gradients')
actions = self.getActions()
for action in actions:
categories = action[-1]
for c in categories:
if c == 'energy' and not data.has_key('kinetic_energy'):
v = self.universe.velocities()
if v is not None:
m = self.universe.masses()
e = (v*v*m*0.5).sumOverParticles()
data['kinetic_energy'] = e
df = self.universe.degreesOfFreedom()
data['temperature'] = 2.*e/df/Units.k_B/Units.K
if c == 'configuration':
if data.has_key('configuration'):
data['configuration'] = data['configuration'].array
else:
data['configuration'] = \
self.universe.configuration().array
if c == 'velocities':
if data.has_key('velocities'):
data['velocities'] = data['velocities'].array
else:
data['velocities'] = self.universe.velocities().array
if c == 'gradients':
if data.has_key('gradients'):
data['gradients'] = data['gradients'].array
p = self.universe.cellParameters()
if p is not None:
data['box_size'] = p
volume = self.universe.cellVolume()
if volume is not None:
data['volume'] = volume
try:
m = self.universe.masses()
data['masses'] = m.array
except: pass
snapshot(self.universe, data, actions, energy_terms)
#
# Trajectory reader (not yet functional...)
#
class TrajectoryReader(TrajectoryGenerator):
def __init__(self, trajectory, options):
TrajectoryGenerator.__init__(self, trajectory.universe, options)
self.input = trajectory
self.available_data = trajectory.variables()
default_options = {'trajectory': None, 'log': None, 'options': []}
def __call__(self, **options):
self.setCallOptions(options)
from MMTK_trajectory import readTrajectory
readTrajectory(self.universe, self.input.trajectory,
[self.getOption('trajectory'),
self.getOption('log')] +
self.getOption('options'))
#
# Print information about trajectory file
#
def trajectoryInfo(filename):
"""Return a string with summarial information about the trajectory
file identified by |filename|."""
from Scientific.IO import NetCDF
file = NetCDF.NetCDFFile(filename, 'r')
nsteps = file.variables['step'].shape[0]
if 'minor_step_number' in file.dimensions.keys():
nsteps = nsteps*file.variables['step'].shape[1]
s = 'Information about trajectory file ' + filename + ':\n'
try:
s = s + file.comment + '\n'
except AttributeError:
pass
s = s + `file.dimensions['atom_number']` + ' atoms\n'
s = s + `nsteps` + ' steps\n'
s = s + file.history
file.close()
return s
|
fxia22/ASM_xf
|
PythonD/site_python/MMTK/Trajectory.py
|
Python
|
gpl-2.0
| 49,365
|
[
"NetCDF"
] |
ff075317aeb1e0d7fd940fabed3a109b60ef95fcc01ba23e8129737e8d3c8211
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe which runs the SKQP apk using docker and an Android Emulator
DEPS = [
'checkout',
'infra',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
# This image is public, and thus doesn't require log-in to read.
DOCKER_IMAGE = ('butomo1989/docker-android-x86-8.1@sha256:'
'ad75c888e373d9ea7a2821fd8f64b53c9a22b5827e6fa516b396739a20b9bb88')
INNER_TEST_SCRIPT = '/SRC/skia/infra/skqp/run_skqp.sh'
def RunSteps(api):
api.vars.setup()
checkout_root = api.path['start_dir']
# This is where the APK should be, that is, where Swarming puts the inputs.
apk_location = api.vars.build_dir
container_name = 'android_em'
# Make sure the emulator starts up, with some resilence against
# occasional flakes.
api.python.inline(
name='Start Emulator',
program='''
import os
import subprocess
import sys
container_name = sys.argv[1]
checkout_root = sys.argv[2]
apk_location = sys.argv[3]
DOCKER_IMAGE = sys.argv[4]
MAX_TRIES = 5
start_cmd = ['docker', 'run', '--privileged', '--rm', '-d', # detached/daemon
'--name', container_name,
'--env', 'DEVICE=Samsung Galaxy S6',
'--volume', '%s:/SRC' % checkout_root,
'--volume', '%s:/OUT' % apk_location,
DOCKER_IMAGE]
wait_cmd = ['docker', 'exec', container_name,
'timeout', '45', 'adb', 'wait-for-device']
for t in range(MAX_TRIES):
print 'Starting Emulator try %d' % t
try:
# Start emulator
print subprocess.check_output(start_cmd)
# Wait a short time using adb-wait-for-device
print subprocess.check_output(wait_cmd)
# if exit code 0, we are good so end loop
print 'Emulator started'
sys.exit(0)
except subprocess.CalledProcessError:
# else kill docker container
print 'Killing and trying again'
print subprocess.check_output(['docker', 'kill', container_name])
print 'Could not start emulator'
sys.exit(1)
''',
args=[container_name, checkout_root, apk_location, DOCKER_IMAGE],
infra_step=True)
api.run(
api.step,
'Test SQKP with Android Emulator in Docker',
cmd=['docker', 'exec', container_name,
INNER_TEST_SCRIPT])
api.run(
api.step,
'Stop Emulator',
cmd=['docker', 'kill', container_name],
infra_step=True)
def GenTests(api):
yield (
api.test('Test-Debian10-Clang-GCE-CPU-Emulator-x86-devrel'
'-All-Android_SKQP') +
api.properties(buildername=('Test-Debian10-Clang-GCE-CPU-Emulator'
'-x86-devrel-All-Android_SKQP'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
|
endlessm/chromium-browser
|
third_party/skia/infra/bots/recipes/test_skqp_emulator.py
|
Python
|
bsd-3-clause
| 3,021
|
[
"Galaxy"
] |
e87c3f031a3cf7aade28e069c239eadb6d19865116d278fe323d756f4abb54d7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import numpy as np
from pymatgen.core import PeriodicSite
from pymatgen.io.vasp import Vasprun, Poscar, Outcar
from pymatgen.analysis.defects.core import Vacancy, Interstitial, DefectEntry
from pymatgen.analysis.defects.defect_compatibility import DefectCompatibility
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class DefectCompatibilityTest(PymatgenTest):
def setUp(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
self.vac = Vacancy(struc, struc.sites[0], charge=-3)
abc = self.vac.bulk_structure.lattice.abc
axisdata = [np.arange(0., lattval, 0.2) for lattval in abc]
bldata = [np.array([1. for u in np.arange(0., lattval, 0.2)]) for lattval in abc]
dldata = [
np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0., lattval, 0.2)]) for lattval in abc
]
self.frey_params = {'axis_grid': axisdata, 'bulk_planar_averages': bldata,
'defect_planar_averages': dldata, 'dielectric': 15,
'initial_defect_structure': struc.copy(),
'defect_frac_sc_coords': struc.sites[0].frac_coords[:]}
kumagai_bulk_struc = Poscar.from_file(os.path.join(test_dir, 'defect', 'CONTCAR_bulk')).structure
bulk_out = Outcar(os.path.join(test_dir, 'defect', 'OUTCAR_bulk.gz'))
defect_out = Outcar(os.path.join(test_dir, 'defect', 'OUTCAR_vac_Ga_-3.gz'))
self.kumagai_vac = Vacancy(kumagai_bulk_struc, kumagai_bulk_struc.sites[0], charge=-3)
kumagai_defect_structure = self.kumagai_vac.generate_defect_structure()
self.kumagai_params = {'bulk_atomic_site_averages': bulk_out.electrostatic_potential,
'defect_atomic_site_averages': defect_out.electrostatic_potential,
'site_matching_indices': [[ind, ind - 1] for ind in range(len(kumagai_bulk_struc))],
'defect_frac_sc_coords': [0., 0., 0.],
'initial_defect_structure': kumagai_defect_structure,
'dielectric': 18.118 * np.identity(3),
'gamma': 0.153156 # not neccessary to load gamma, but speeds up unit test
}
v = Vasprun(os.path.join(test_dir, 'vasprun.xml'))
eigenvalues = v.eigenvalues.copy()
kptweights = v.actual_kpoints_weights
potalign = -0.1
vbm = v.eigenvalue_band_properties[2]
cbm = v.eigenvalue_band_properties[1]
self.bandfill_params = {'eigenvalues': eigenvalues,
'kpoint_weights': kptweights,
'potalign': potalign,
'vbm': vbm, 'cbm': cbm}
self.band_edge_params = {'hybrid_cbm': 1., 'hybrid_vbm': -1., 'vbm': -0.5,
'cbm': 0.6, 'num_hole_vbm': 1., 'num_elec_cbm': 1.}
def test_process_entry(self):
# basic process with no corrections
dentry = DefectEntry(self.vac, 0., corrections={}, parameters={'vbm': 0., 'cbm': 0.}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry(dentry)
self.assertIsNotNone(dentry)
# process with corrections from parameters used in other unit tests
params = self.frey_params.copy()
params.update(self.bandfill_params)
params.update({'hybrid_cbm': params['cbm'] + .2, 'hybrid_vbm': params['vbm'] - .4, })
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 1.2)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], 0.0)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 5.44595036)
# test over delocalized free carriers which forces skipping charge correction
# modify the eigenvalue list to have free holes
hole_eigenvalues = {}
for spinkey, spinset in params['eigenvalues'].items():
hole_eigenvalues[spinkey] = []
for kptset in spinset:
hole_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] < params['vbm']) and (eig[0] > params['vbm'] - .8):
hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
hole_eigenvalues[spinkey][-1].append(eig)
params.update({'eigenvalues': hole_eigenvalues})
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility(free_chg_cutoff=0.8)
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 1.19999999)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], -1.62202400)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 0.)
# turn off band filling and band edge shifting
dc = DefectCompatibility(free_chg_cutoff=0.8, use_bandfilling=False, use_bandedgeshift=False)
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 0.)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], 0.)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 0.)
def test_perform_all_corrections(self):
# return entry even if insufficent values are provided
# for freysoldt, kumagai, bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_all_corrections(de)
self.assertIsNotNone(dentry)
# all other correction applications are tested in unit tests below
def test_perform_freysoldt(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_freysoldt(de)
val = dentry.parameters['freysoldt_meta']
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.975893)
self.assertAlmostEqual(val['freysoldt_potential_alignment_correction'], 4.4700574)
self.assertAlmostEqual(val['freysoldt_potalign'], 1.4900191)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_perform_kumagai(self):
de = DefectEntry(self.kumagai_vac, 0., parameters=self.kumagai_params)
dc = DefectCompatibility()
dentry = dc.perform_kumagai(de)
val = dentry.parameters['kumagai_meta']
self.assertAlmostEqual(val['kumagai_electrostatic'], 0.88236299)
self.assertAlmostEqual(val['kumagai_potential_alignment_correction'], 2.09704862)
self.assertAlmostEqual(val['kumagai_potalign'], 0.69901620)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_run_bandfilling(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.bandfill_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_bandfilling(de)
val = dentry.parameters['bandfilling_meta']
self.assertAlmostEqual(val['num_hole_vbm'], 0.)
self.assertAlmostEqual(val['num_elec_cbm'], 0.)
self.assertAlmostEqual(val['bandfilling_correction'], 0.)
def test_run_band_edge_shifting(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.band_edge_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_band_edge_shifting(de)
val = dentry.parameters['bandshift_meta']
self.assertEqual(val['vbmshift'], -0.5)
self.assertEqual(val['cbmshift'], 0.4)
self.assertEqual(val['bandedgeshifting_correction'], 1.5)
def test_delocalization_analysis(self):
# return entry even if insufficent values are provided
# for delocalization analysis with freysoldt, kumagai,
# bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.delocalization_analysis(de)
self.assertIsNotNone(dentry)
# all other correction applications are tested in unit tests below
def test_check_freysoldt_delocalized(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
de.parameters.update({'is_compatible': True}) # needs to be initialized with this here for unittest
dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.5)
dentry = dc.perform_freysoldt(de)
# check case which fits under compatibility constraints
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertTrue(frey_delocal['is_compatible'])
ans_var = [0.00038993, 0.02119532, 0.02119532]
ans_window = [0.048331509, 0.36797169, 0.36797169]
for ax in range(3):
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertAlmostEqual(ax_metadata['frey_variance'], ans_var[ax])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
self.assertAlmostEqual(ax_metadata['frey_minmax_window'], ans_window[ax])
self.assertTrue(dentry.parameters['is_compatible'])
# check planar delocalization on 2nd and 3rd axes
dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.2)
dentry.parameters.update({'is_compatible': True})
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse(frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
for ax in [1, 2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertFalse(ax_metadata['frey_minmax_compatible'])
self.assertFalse(dentry.parameters['is_compatible'])
# check variance based delocalization on 2nd and 3rd axes
dc = DefectCompatibility(plnr_avg_var_tol=0.01, plnr_avg_minmax_tol=0.5)
dentry.parameters.update({'is_compatible': True})
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse(frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
for ax in [1, 2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertFalse(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_kumagai_delocalized(self):
de = DefectEntry(self.kumagai_vac, 0., parameters=self.kumagai_params)
de.parameters.update({'is_compatible': True}) # needs to be initialized with this here for unittest
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=20.95)
dentry = dc.perform_kumagai(de)
# check case which fits under compatibility constraints
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertTrue(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
true_variance = 13.262304401193997
true_minmax = 20.9435
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertTrue(dentry.parameters['is_compatible'])
# break variable compatibility
dc = DefectCompatibility(atomic_site_var_tol=0.1, atomic_site_minmax_tol=20.95)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertFalse(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
# break maxmin compatibility
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=0.5)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertFalse(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_final_relaxed_structure_delocalized(self):
# test structure delocalization analysis
# first test no movement in atoms
initial_defect_structure = self.vac.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
sampling_radius = 4.55
defect_frac_sc_coords = self.vac.site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility(tot_relax_tol=0.1, perc_relax_tol=0.1, defect_tot_relax_tol=0.1)
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertTrue(dentry.parameters['is_compatible'])
self.assertTrue(struc_delocal['is_compatible'])
self.assertTrue(struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertEqual(struc_delocal['metadata']['tot_relax_outside_rad'], 0.)
self.assertTrue(struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertEqual(struc_delocal['metadata']['perc_relax_outside_rad'], 0.)
self.assertEqual(len(struc_delocal['metadata']['full_structure_relax_data']), len(initial_defect_structure))
self.assertIsNone(struc_delocal['metadata']['defect_index'])
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertTrue(defect_delocal['is_compatible'])
self.assertIsNone(defect_delocal['metadata']['relax_amount'])
# next test for when structure has delocalized outside of radius from defect
pert_struct_fin_struct = initial_defect_structure.copy()
pert_struct_fin_struct.perturb(0.1)
dentry.parameters.update({'final_defect_structure': pert_struct_fin_struct})
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertFalse(dentry.parameters['is_compatible'])
self.assertFalse(struc_delocal['is_compatible'])
self.assertFalse(struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertAlmostEqual(struc_delocal['metadata']['tot_relax_outside_rad'], 12.5)
self.assertFalse(struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertAlmostEqual(struc_delocal['metadata']['perc_relax_outside_rad'], 77.63975155)
# now test for when an interstitial defect has migrated too much
inter_def_site = PeriodicSite('H', [7.58857304, 11.70848069, 12.97817518],
self.vac.bulk_structure.lattice, to_unit_cell=True,
coords_are_cartesian=True)
inter = Interstitial(self.vac.bulk_structure, inter_def_site, charge=0)
initial_defect_structure = inter.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
poss_deflist = sorted(
final_defect_structure.get_sites_in_sphere(inter.site.coords,
2, include_index=True), key=lambda x: x[1])
def_index = poss_deflist[0][2]
final_defect_structure.translate_sites(indices=[def_index],
vector=[0., 0., 0.008]) # fractional coords translation
defect_frac_sc_coords = inter_def_site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(inter, 0., corrections={}, parameters=params, entry_id=None)
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertFalse(defect_delocal['is_compatible'])
self.assertAlmostEqual(defect_delocal['metadata']['relax_amount'], 0.10836054)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/analysis/defects/tests/test_defect_compatibility.py
|
Python
|
mit
| 19,040
|
[
"VASP",
"pymatgen"
] |
8854055dbcde70da112e088665244ddac7674a752a53b8a66a6320d66b8190bc
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
"""
**********************************************
io_extended - read/write configurational files
**********************************************
This Python module allows one to read and write configurational files.
One can choose folded or unfolded coordinates and write down velocities or not.
It is similar to lammps read and write, but it writes down only:
1) number of particles + types
2) number of bonds (number of pairs) + types
3) number of angles (number of triples) + types
4) number of dihedrals (number of quadruples) + types
5) system size (Lx,Ly,Lz)
6) p_id, p_type, p_positions
7) velocities (if true)
8) bonds (if exist)
9) angles (if exist)
10)dihedrals (if exist)
read returns:
Lx, Ly, Lz, p_ids, p_types, poss, vels, bonds, angles, dihedrals
if something does not exist then it will return the empty list
bonds, angles, dihedrals - will return list [type, (x,x,x,x)],
where type is the type of bond, angle or dihedral
(x,x,x,x) is (pid1,pid2) for bonds,
(pid1,pid2,pid3) for angles
(pid1,pid2,pid3,pid4) for dihedrals
"""
import espressopp
def write(fileName, system, folded=True, writeVelocities=False):
# first collect all the information that we need to write into the file
numParticles = int(espressopp.analysis.NPart(system).compute())
box_x = system.bc.boxL[0]
box_y = system.bc.boxL[1]
box_z = system.bc.boxL[2]
bonds = []
nbondtypes = 0
angles = []
nangletypes = 0
dihedrals = []
ndihedraltypes = 0
nInteractions = system.getNumberOfInteractions()
for i in xrange(nInteractions):
bT = system.getInteraction(i).bondType()
if bT == espressopp.interaction.Pair:
nbondtypes += 1
bl = system.getInteraction(i).getFixedPairList().getBonds()
bln = []
for j in xrange(len(bl)):
bln.extend(bl[j])
bonds.append(bln)
elif bT == espressopp.interaction.Angular:
nangletypes += 1
an = system.getInteraction(i).getFixedTripleList().getTriples()
ann = []
for j in xrange(len(an)):
ann.extend(an[j])
angles.append(ann)
elif bT == espressopp.interaction.Dihedral:
ndihedraltypes += 1
di = system.getInteraction(i).getFixedQuadrupleList().getQuadruples()
din = []
for j in xrange(len(di)):
din.extend(di[j])
dihedrals.append(din)
nbonds = 0
for i in xrange(len(bonds)):
nbonds += len(bonds[i])
nangles = 0
for i in xrange(len(angles)):
nangles += len(angles[i])
ndihedrals = 0
for i in xrange(len(dihedrals)):
ndihedrals += len(dihedrals[i])
atomtypes = []
maxParticleID = int(espressopp.analysis.MaxPID(system).compute())
pid = 0
while pid <= maxParticleID:
if system.storage.particleExists(pid):
particle = system.storage.getParticle(pid)
type = particle.type
if type in atomtypes:
pid += 1
else:
atomtypes.append(type)
pid += 1
else:
pid += 1
natomtypes = len(atomtypes)
# now we can write the file
file = open(fileName,'w')
file.write('io_extended\n\n')
file.write('%5d atoms\n' % numParticles)
file.write('%5d bonds\n' % nbonds)
file.write('%5d angles\n' % nangles)
file.write('%5d dihedrals\n' % ndihedrals)
file.write('%5d atom types\n' % natomtypes)
file.write('%5d bond types\n' % nbondtypes)
file.write('%5d angle types\n' % nangletypes)
file.write('%5d dihedral types\n' % ndihedraltypes)
file.write('%.15f %.15f xlo xhi\n' % (0.0, box_x))
file.write('%.15f %.15f ylo yhi\n' % (0.0, box_y))
file.write('%.15f %.15f zlo zhi\n' % (0.0, box_z))
file.write('\nAtoms\n\n');
pid = 0
while pid <= maxParticleID:
if system.storage.particleExists(pid):
particle = system.storage.getParticle(pid)
if folded:
xpos = particle.pos.x
ypos = particle.pos.y
zpos = particle.pos.z
else:
p = system.bc.getUnfoldedPosition(particle.pos, particle.imageBox)
xpos = p[0]
ypos = p[1]
zpos = p[2]
type = particle.type
st = "%d %d %.15f %.15f %.15f\n"%(pid, type, xpos, ypos, zpos)
file.write(st)
pid += 1
# velocities are written in the same order as coordinates, thus it does not need ID.
if writeVelocities:
file.write('\nVelocities\n\n');
pid = 0
while pid <= maxParticleID:
if system.storage.particleExists(pid):
particle = system.storage.getParticle(pid)
xvel = particle.v[0]
yvel = particle.v[1]
zvel = particle.v[2]
st = "%.12f %.12f %.12f\n"%(xvel, yvel, zvel)
file.write(st)
pid += 1
else:
pid += 1
if nbonds > 0:
file.write('\nBonds\n\n')
bn = 0
for i in xrange(len(bonds)):
for j in xrange(len(bonds[i])):
file.write('%d %d %d %d\n' % (bn, i, bonds[i][j][0], bonds[i][j][1]))
bn += 1
if nangles > 0:
file.write('\nAngles\n\n')
an = 0
for i in xrange(len(angles)):
for j in xrange(len(angles[i])):
file.write('%d %d %d %d %d\n' % (an, i, angles[i][j][1], angles[i][j][0], angles[i][j][2]))
an += 1
if ndihedrals > 0:
file.write('\nDihedrals\n\n')
dn = 0
for i in xrange(len(dihedrals)):
for j in xrange(len(dihedrals[i])):
file.write('%d %d %d %d %d %d\n' % (dn, i, dihedrals[i][j][0], dihedrals[i][j][1], dihedrals[i][j][2], dihedrals[i][j][3]))
dn += 1
file.close()
def read(fileName, readVelocities=False):
f = open(fileName)
line = f.readline() # comment line
while not 'atoms' in line: #skip possible blank line
line = f.readline()
num_particles = int(line.split()[0])
num_bonds = int(f.readline().split()[0])
num_angles = int(f.readline().split()[0])
num_dihedrals = int(f.readline().split()[0])
# find and store size of box
line = ''
while not 'xlo' in line:
line = f.readline()
xmin, xmax = map(float, line.split()[0:2])
ymin, ymax = map(float, f.readline().split()[0:2])
zmin, zmax = map(float, f.readline().split()[0:2])
Lx = xmax - xmin
Ly = ymax - ymin
Lz = zmax - zmin
# find and store coordinates
line = ''
while not 'Atoms' in line:
line = f.readline()
line = f.readline()
p_ids = []
p_types = []
poss = []
for i in xrange(num_particles):
k, kk, rx, ry, rz = map(float, f.readline().split()[0:])
p_ids.append(int(k))
p_types.append(int(kk))
poss.append(espressopp.Real3D(rx, ry, rz))
vels = []
if(readVelocities):
# find and store velocities
line = ''
while not 'Velocities' in line:
line = f.readline()
line = f.readline() # blank line
for i in xrange(num_particles):
vx_, vy_, vz_ = map(float, f.readline().split()[0:])
vels.append(espressopp.Real3D(vx_, vy_, vz_))
bonds = []
if(num_bonds != 0):
# find and store bonds
line = ''
while not 'Bonds' in line:
line = f.readline()
line = f.readline()
for i in xrange(num_bonds):
bond_id, bond_type, pid1, pid2 = map(int, f.readline().split())
bonds.append([bond_type, (pid1, pid2)])
angles = []
if(num_angles != 0):
# find and store angles
line = ''
while not 'Angles' in line:
line = f.readline()
line = f.readline()
for i in xrange(num_angles):
angle_id, angle_type, pid1, pid2, pid3 = map(int, f.readline().split())
angles.append([angle_type, (pid1, pid2, pid3)])
dihedrals = []
if(num_dihedrals != 0):
# find and store angles
line = ''
while not 'Dihedrals' in line:
line = f.readline()
line = f.readline()
for i in xrange(num_dihedrals):
dihedral_id, dihedral_type, pid1, pid2, pid3, pid4 = map(int, f.readline().split())
dihedrals.append([dihedral_type, (pid1, pid2, pid3, pid4)])
f.close()
return Lx, Ly, Lz, p_ids, p_types, poss, vels, bonds, angles, dihedrals
|
kkreis/espressopp
|
src/tools/io_extended.py
|
Python
|
gpl-3.0
| 8,986
|
[
"ESPResSo",
"LAMMPS"
] |
cff8c25b9118886bd9dbb0928182d424005bed843bdbfba3ae2425d230477e30
|
#!/usr/bin/env python
description = ">> New automated psf version"
usage = "%prog image [options] "
# ###############################################################
# EC 2012 Feb 20
# modified by SV for lsc
################################################################
import os, sys, shutil, subprocess
import time
from optparse import OptionParser
from pyraf import iraf
try: from astropy.io import fits as pyfits
except: import pyfits
import agnkey
import traceback
import numpy as np
iraf.noao(_doprint=0)
iraf.obsutil(_doprint=0)
def runsex(img, fwhm, thresh, pix_scale): ## run_sextractor fwhm in pixel
import agnkey
from agnkey.util import defsex
mina = 5.
seeing = fwhm * pix_scale
cdef = open(agnkey.__path__[0] + '/standard/sex/default2.param')
riga = cdef.readlines()
cparam = []
for r in riga:
if r[0] != '#' and len(r.strip()) > 0: \
cparam.append(r.split()[0])
pid = subprocess.Popen("sex " + img + ".fits -catalog_name tmp.cat" + \
" -c " + agnkey.__path__[0] +
"/standard/sex/default2.sex -PARAMETERS_NAME " + agnkey.__path__[0] +
"/standard/sex/default2.param" + " -STARNNW_NAME " + agnkey.__path__[0] +
"/standard/sex/default2.nnw" + " -PIXEL_SCALE " + str(pix_scale) + \
" -DETECT_MINAREA " + str(mina) + " -DETECT_THRESH " + str(thresh) + \
" -ANALYSIS_THRESH " + str(thresh) + " -PHOT_FLUXFRAC 0.5" + \
" -SEEING_FWHM " + str(seeing), stdout=subprocess.PIPE, shell=True)
output, error = pid.communicate()
csex = open("tmp.cat")
tab = {}
riga = csex.readlines()
for k in cparam: tab[k] = []
for r in riga:
if r[0] != '#':
for i in range(len(cparam)):
tab[cparam[i]].append(float(r.split()[i]))
for k in cparam: tab[k] = np.array(tab[k])
xdim, ydim = iraf.hselect(img, 'i_naxis1,i_naxis2', 'yes', Stdout=1) \
[0].split()
xcoo, ycoo, ra, dec, magbest, classstar, fluxrad, bkg = [], [], [], [], [], [], [], []
for i in range(len(tab['X_IMAGE'])):
x, y = tab['X_IMAGE'][i], tab['Y_IMAGE'][i]
if 5 < x < int(xdim) - 5 and 5 < y < int(ydim) - 5: # trim border
xcoo.append(x)
ycoo.append(y)
ra.append(tab['X_WORLD'][i])
dec.append(tab['Y_WORLD'][i])
magbest.append(tab['MAG_BEST'][i])
classstar.append(tab['CLASS_STAR'][i])
fluxrad.append(tab['FLUX_RADIUS'][i])
bkg.append(tab['BACKGROUND'][i])
return np.array(xcoo), np.array(ycoo), np.array(ra), np.array(dec), np.array(magbest), \
np.array(classstar), np.array(fluxrad), np.array(bkg)
def apfit(img, fwhm, hdr, interactive, _datamax=45000, fixaperture=False):
import agnkey
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
zmag = 0.
varord = 0 # -1 analitic 0 - numeric
if fixaperture:
print 'use fix aperture 5 8 10'
hdr = agnkey.util.readhdr(img+'.fits')
_pixelscale = agnkey.util.readkey3(hdr, 'PIXSCALE')
a1, a2, a3, a4, = float(5. / _pixelscale), float(5. / _pixelscale), float(8. / _pixelscale), float(
10. / _pixelscale)
else:
a1, a2, a3, a4, = int(fwhm + 0.5), int(fwhm * 2 + 0.5), int(fwhm * 3 + 0.5), int(fwhm * 4 + 0.5)
zmag= 0.0
_datamax=45000
_center='no'
iraf.fitskypars.annulus = a4
iraf.fitskypars.dannulus = a4
iraf.noao.digiphot.daophot.daopars.sannulus = int(a4)
iraf.noao.digiphot.daophot.daopars.wsannul = int(a4)
iraf.fitskypars.salgori = 'mean' #mode,mean,gaussian
iraf.photpars.apertures = '%d,%d,%d' % (a2, a3, a4)
iraf.datapars.datamin = -100
iraf.datapars.datamax = _datamax
iraf.datapars.readnoise = agnkey.util.readkey3(hdr, 'ron')
iraf.datapars.epadu = agnkey.util.readkey3(hdr, 'gain')
iraf.datapars.exposure = 'exptime' #agnkey.util.readkey3(hdr,'exptime')
iraf.datapars.airmass = 'airmass'
iraf.datapars.filter = 'filter2'
iraf.centerpars.calgori = 'gauss'
iraf.centerpars.cbox = 1
iraf.daopars.recenter = _center
iraf.photpars.zmag = zmag
iraf.delete('_ap.ma*', verify=False)
iraf.phot(img, '_ap.coo', '_ap.mag', interac=False, verify=False, verbose=False)
photmag = iraf.txdump("_ap.mag", 'xcenter,ycenter,id,mag,merr', expr='yes', Stdout=1)
return photmag
def ecpsf(img, ofwhm, threshold, interactive, ds9, fixaperture=False,_catalog=''):
try:
import agnkey
import string
hdr = agnkey.util.readhdr(img + '.fits')
instrument = agnkey.util.readkey3(hdr, 'instrume')
print 'INSTRUMENT:', instrument
if 'PIXSCALE' in hdr:
pixelscale = agnkey.util.readkey3(hdr, 'PIXSCALE')
elif 'CCDSCALE' in hdr:
if 'CCDXBIN' in hdr:
pixelscale = agnkey.util.readkey3(hdr, 'CCDSCALE') * agnkey.util.readkey3(hdr, 'CCDXBIN')
elif 'CCDSUM' in hdr:
pixelscale = agnkey.util.readkey3(hdr, 'CCDSCALE') * int(
string.split(agnkey.util.readkey3(hdr, 'CCDSUM'))[0])
if instrument in ['kb05', 'kb70', 'kb71', 'kb73', 'kb74', 'kb75', 'kb76', 'kb77', 'kb78', 'kb79']:
scale = pixelscale
_datamax = 45000
elif instrument in ['fl02', 'fl03', 'fl04']:
scale = pixelscale
_datamax = 120000
elif instrument in ['fs01', 'em03']:
scale = pixelscale
_datamax = 65000
elif instrument in ['fs02', 'fs03']:
scale = pixelscale
_datamax = 65000
elif instrument in ['em01']:
scale = pixelscale
_datamax = 65000
try:
_wcserr = agnkey.util.readkey3(hdr, 'wcserr')
if float(_wcserr) == 0:
if instrument in ['kb05', 'kb70', 'kb71', 'kb73', 'kb74', 'kb75', 'kb76', 'kb77', 'kb78', 'kb79']:
seeing = float(agnkey.util.readkey3(hdr, 'L1FWHM')) * .75
elif instrument in ['fl02', 'fl03', 'fl04']:
seeing = float(agnkey.util.readkey3(hdr, 'L1FWHM')) * .75
elif instrument in ['fs01', 'fs02', 'fs03', 'em03', 'em01']:
if 'L1FWHM' in hdr:
seeing = float(agnkey.util.readkey3(hdr, 'L1FWHM')) * .75
elif 'L1SEEING' in hdr:
seeing = float(agnkey.util.readkey3(hdr, 'L1SEEING')) * scale
else:
seeing = 3
else:
seeing = 3
else:
seeing = float(agnkey.util.readkey3(hdr, 'PSF_FWHM'))
sys.exit('astrometry not good')
except:
sys.exit('astrometry not good')
fwhm = seeing / scale
print 'FWHM[header] ', fwhm, ' in pixel'
if ofwhm:
fwhm = float(ofwhm)
print ' FWHM[input] ', fwhm, ' in pixel'
if interactive:
iraf.display(img, 1, fill=True)
iraf.delete('tmp.lo?', verify=False)
print '_' * 80
print '>>> Mark reference stars with "a". Then "q"'
print '-' * 80
iraf.imexamine(img, 1, wcs='logical', logfile='tmp.log', keeplog=True)
xyrefer = iraf.fields('tmp.log', '1,2,6,15', Stdout=1)
xns, yns, _fws = [], [], []
ff = open('_ap.coo', 'w')
for i in range(len(xyrefer)):
xns.append(float(xyrefer[i].split()[0]))
yns.append(float(xyrefer[i].split()[1]))
_fws.append(float(xyrefer[i].split()[3]))
ff.write('%10.3f %10.3f %7.2f \n' % (xns[i], yns[i], float(_fws[i])))
ff.close()
elif _catalog:
# cat1=agnkey.agnastrodef.readtxt(_catalog)
# cat1['ra'],cat1['dec']
ddd=iraf.wcsctran(input=_catalog,output='STDOUT',Stdout=1,image=img,inwcs='world',outwcs='logical',
units='degrees degrees',columns='1 2',formats='%10.1f %10.1f',verbose='no')
ddd=[i for i in ddd if i[0]!='#']
ddd=[' '.join(i.split()[0:3]) for i in ddd]
ff = open('_ap.coo', 'w')
for i in ddd:
a,b,c = string.split(i)
#print a,b,c
ff.write('%10s %10s %10s \n' % (a, b, c))
ff.close()
print 'use catalog'
else:
xs, ys, ran, decn, magbest, classstar, fluxrad, bkg = runsex(img, fwhm, threshold, scale)
ff = open('_ap.coo', 'w')
for i in range(len(xs)):
ff.write('%10.3f %10.3f %7.2f \n' % (xs[i], ys[i], float(fluxrad[i])))
ff.close() ## End automatic selection
print 80 * "#"
photmag = apfit(img, fwhm, hdr, interactive, _datamax, fixaperture)
radec = iraf.wcsctran(input='STDIN', output='STDOUT', Stdin=photmag, \
Stdout=1, image=img, inwcs='logical', outwcs='world', columns="1 2", \
format='%13.3H %12.2h', min_sig=9, mode='h')[3:]
exptime = agnkey.util.readkey3(hdr, 'exptime')
object = agnkey.util.readkey3(hdr, 'object').replace(' ', '')
filtro = agnkey.util.readkey3(hdr, 'filter')
#######################################
rap, decp, magp2, magp3, magp4, smagf = [], [], [], [], [], []
merrp2, merrp3, merrp4, smagerrf = [], [], [], []
rap0, decp0 = [], []
for i in range(len(radec)):
aa = radec[i].split()
rap.append(aa[0])
decp.append(aa[1])
rap0.append(agnkey.agnabsphotdef.deg2HMS(ra=aa[0]))
decp0.append(agnkey.agnabsphotdef.deg2HMS(dec=aa[1]))
idp = aa[2]
magp2.append(aa[3])
magp3.append(aa[4])
magp4.append(aa[5])
merrp2.append(aa[6])
merrp3.append(aa[7])
merrp4.append(aa[8])
tbhdu = pyfits.new_table(pyfits.ColDefs([
pyfits.Column(name='ra', format='20A', array=np.array(rap)),
pyfits.Column(name='dec', format='20A', array=np.array(decp)),
pyfits.Column(name='ra0', format='E', array=np.array(rap0)),
pyfits.Column(name='dec0', format='E', array=np.array(decp0)),
pyfits.Column(name='magp2', format='E', array=np.array(np.where((np.array(magp2) != 'INDEF'),
np.array(magp2), 9999), float)),
pyfits.Column(name='magp3', format='E', array=np.array(np.where((np.array(magp3) != 'INDEF'),
np.array(magp3), 9999), float)),
pyfits.Column(name='magp4', format='E', array=np.array(np.where((np.array(magp4) != 'INDEF'),
np.array(magp4), 9999), float)),
pyfits.Column(name='merrp2', format='E', array=np.array(np.where((np.array(merrp2) != 'INDEF'),
np.array(merrp2), 9999), float)),
pyfits.Column(name='merrp3', format='E', array=np.array(np.where((np.array(merrp3) != 'INDEF'),
np.array(merrp3), 9999), float)),
pyfits.Column(name='merrp4', format='E', array=np.array(np.where((np.array(merrp4) != 'INDEF'),
np.array(merrp4), 9999), float)),
pyfits.Column(name='smagf', format='E', array=np.array(np.where((np.array(magp2) != 'INDEF'),
np.array(magp2), 9999), float)),
pyfits.Column(name='smagerrf', format='E', array=np.array(np.where((np.array(merrp2) != 'INDEF'),
np.array(merrp2), 9999), float)),
]))
hdu = pyfits.PrimaryHDU(header=hdr)
thdulist = pyfits.HDUList([hdu, tbhdu])
agnkey.util.delete(img + '.sn2.fits')
thdulist.writeto(img + '.sn2.fits')
agnkey.util.updateheader(img + '.sn2.fits', 0,
{'XDIM': [agnkey.util.readkey3(hdr, 'naxis1'), 'x number of pixels']})
agnkey.util.updateheader(img + '.sn2.fits', 0,
{'YDIM': [agnkey.util.readkey3(hdr, 'naxis2'), 'y number of pixels']})
agnkey.util.updateheader(img + '.sn2.fits', 0,
{'PSF_FWHM': [fwhm * scale, 'FWHM (arcsec) - computed with daophot']})
os.chmod(img + '.sn2.fits', 0664)
os.chmod(img + '.psf.fits', 0664)
result = 1
except:
result = 0
fwhm = 0.0
traceback.print_exc()
return result, fwhm * scale
###########################################################################
if __name__ == "__main__":
start_time = time.time()
parser = OptionParser(usage=usage, description=description)
parser.add_option("-f", "--fwhm", dest="fwhm", default='', help='starting FWHM \t\t\t %default')
parser.add_option("-t", "--threshold", dest="threshold", default=10., type='float',
help='Source detection threshold \t\t\t %default')
parser.add_option("-c", "--catalog", dest="catalog", default='', type='str',
help='use input catalog \t\t %default')
parser.add_option("-r", "--redo", action="store_true", dest='redo', default=False,
help='Re-do \t\t\t\t [%default]')
parser.add_option("-i", "--interactive", action="store_true", dest='interactive', default=False,
help='Interactive \t\t\t [%default]')
parser.add_option("--fix", action="store_true", dest='fixaperture', default=False,
help='fixaperture \t\t\t [%default]')
parser.add_option("-s", "--show", dest="show", action='store_true', default=False,
help='Show PSF output \t\t [%default]')
parser.add_option("-X", "--xwindow", action="store_true", dest='xwindow', default=False,
help='xwindow \t\t\t [%default]')
option, args = parser.parse_args()
if len(args) < 1: sys.argv.append('--help')
option, args = parser.parse_args()
imglist = agnkey.util.readlist(args[0])
_xwindow = option.xwindow
fixaperture = option.fixaperture
_catalog = option.catalog
option, args = parser.parse_args()
if _xwindow:
from stsci.tools import capable
capable.OF_GRAPHICS = False
for img in imglist:
if '.fits' in img: img = img[:-5]
if os.path.exists(img + '.sn2.fits') and not option.redo:
print img + ': psf already calculated'
else:
ds9 = os.system("ps -U" + str(os.getuid()) + "|grep -v grep | grep ds9")
if option.interactive and ds9 != 0:
pid = subprocess.Popen(['ds9']).pid
time.sleep(2)
ds9 = 0
result, fwhm = ecpsf(img, option.fwhm, option.threshold, option.interactive, ds9, fixaperture,_catalog)
print '\n### ' + str(result)
iraf.delete("tmp.*", verify="no")
iraf.delete("_psf.*", verify="no")
print "********** Completed in ", int(time.time() - start_time), "sec"
print result
if option.show:
agnkey.util.marksn2(img + '.fits', img + '.sn2.fits', 1, '')
try:
import string
if result == 1:
agnkey.agnsqldef.updatevalue('dataredulco', 'fwhm', fwhm, string.split(img, '/')[-1] + '.fits')
agnkey.agnsqldef.updatevalue('dataredulco', 'mag', 9999, string.split(img, '/')[-1] + '.fits')
agnkey.agnsqldef.updatevalue('dataredulco', 'apmag', 9999, string.split(img, '/')[-1] + '.fits')
if os.path.isfile(img + '.diff.fits') and os.path.isfile(
img + '.sn2.fits'): # update diff info is file available
os.system('cp ' + img + '.sn2.fits ' + img + '.diff.sn2.fits')
agnkey.agnsqldef.updatevalue('dataredulco', 'fwhm', fwhm,
string.split(img, '/')[-1] + '.diff.fits')
agnkey.agnsqldef.updatevalue('dataredulco', 'mag', 9999,
string.split(img, '/')[-1] + '.diff.fits')
agnkey.agnsqldef.updatevalue('dataredulco', 'apmag', 9999,
string.split(img, '/')[-1] + '.diff.fits')
else:
pass
except:
print 'module mysqldef not found'
|
svalenti/agnkey
|
trunk/bin/agnpsf2.py
|
Python
|
mit
| 17,133
|
[
"Gaussian"
] |
ed1c1bf7bf7a3b4fa903aa7149487f70158d1ca98783b082c89c78e3679fcf7e
|
"""
Regression test for refinement of beam, detector and crystal orientation
parameters using generated reflection positions from ideal geometry.
"""
from __future__ import annotations
def test():
# Python and cctbx imports
from math import pi
from cctbx.sgtbx import space_group, space_group_symbols
# Symmetry constrained parameterisation for the unit cell
from cctbx.uctbx import unit_cell
# We will set up a mock scan and a mock experiment list
from dxtbx.model import ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
from scitbx.array_family import flex
# Get modules to build models and minimiser using PHIL
import dials.tests.algorithms.refinement.setup_geometry as setup_geometry
import dials.tests.algorithms.refinement.setup_minimiser as setup_minimiser
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
# Model parameterisations
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
# Parameterisation of the prediction equation
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansExperimentsPredictor,
ScansRayPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
# Imports for the target function
from dials.algorithms.refinement.target import (
LeastSquaresPositionalResidualWithRmsdCutoff,
)
# Reflection prediction
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
#############################
# Setup experimental models #
#############################
override = """geometry.parameters
{
beam.wavelength.random=False
beam.wavelength.value=1.0
beam.direction.inclination.random=False
crystal.a.length.random=False
crystal.a.length.value=12.0
crystal.a.direction.method=exactly
crystal.a.direction.exactly.direction=1.0 0.002 -0.004
crystal.b.length.random=False
crystal.b.length.value=14.0
crystal.b.direction.method=exactly
crystal.b.direction.exactly.direction=-0.002 1.0 0.002
crystal.c.length.random=False
crystal.c.length.value=13.0
crystal.c.direction.method=exactly
crystal.c.direction.exactly.direction=0.002 -0.004 1.0
detector.directions.method=exactly
detector.directions.exactly.dir1=0.99 0.002 -0.004
detector.directions.exactly.norm=0.002 -0.001 0.99
detector.centre.method=exactly
detector.centre.exactly.value=1.0 -0.5 199.0
}"""
master_phil = parse(
"""
include scope dials.tests.algorithms.refinement.geometry_phil
include scope dials.tests.algorithms.refinement.minimiser_phil
""",
process_includes=True,
)
models = setup_geometry.Extract(
master_phil, local_overrides=override, verbose=False
)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
mybeam = models.beam
###########################
# Parameterise the models #
###########################
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
# Fix beam to the X-Z plane (imgCIF geometry), fix wavelength
s0_param.set_fixed([True, False, True])
########################################################################
# Link model parameterisations together into a parameterisation of the #
# prediction equation #
########################################################################
# Build a mock scan for a 180 degree sequence
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 1800),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(1800)),
deg=True,
)
# Build an ExperimentList
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
# Create the PredictionParameterisation
pred_param = XYPhiPredictionParameterisation(
experiments, [det_param], [s0_param], [xlo_param], [xluc_param]
)
################################
# Apply known parameter shifts #
################################
# shift detector by 1.0 mm each translation and 4 mrad each rotation
det_p_vals = det_param.get_param_vals()
p_vals = [a + b for a, b in zip(det_p_vals, [1.0, 1.0, 1.0, 4.0, 4.0, 4.0])]
det_param.set_param_vals(p_vals)
# shift beam by 4 mrad in free axis
s0_p_vals = s0_param.get_param_vals()
p_vals = list(s0_p_vals)
p_vals[0] += 4.0
s0_param.set_param_vals(p_vals)
# rotate crystal a bit (=3 mrad each rotation)
xlo_p_vals = xlo_param.get_param_vals()
p_vals = [a + b for a, b in zip(xlo_p_vals, [3.0, 3.0, 3.0])]
xlo_param.set_param_vals(p_vals)
# change unit cell a bit (=0.1 Angstrom length upsets, 0.1 degree of
# alpha and beta angles)
xluc_p_vals = xluc_param.get_param_vals()
cell_params = mycrystal.get_unit_cell().parameters()
cell_params = [a + b for a, b in zip(cell_params, [0.1, -0.1, 0.1, 0.1, -0.1, 0.0])]
new_uc = unit_cell(cell_params)
newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
S = symmetrize_reduce_enlarge(mycrystal.get_space_group())
S.set_orientation(orientation=newB)
X = tuple([e * 1.0e5 for e in S.forward_independent_parameters()])
xluc_param.set_param_vals(X)
#############################
# Generate some reflections #
#############################
# All indices in a 2.0 Angstrom sphere
resolution = 2.0
index_generator = IndexGenerator(
mycrystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
sequence_range = myscan.get_oscillation_range(deg=False)
im_width = myscan.get_oscillation(deg=False)[1]
assert sequence_range == (0.0, pi)
assert approx_equal(im_width, 0.1 * pi / 180.0)
# Predict rays within the sequence range
ray_predictor = ScansRayPredictor(experiments, sequence_range)
obs_refs = ray_predictor(indices)
# Take only those rays that intersect the detector
intersects = ray_intersection(mydetector, obs_refs)
obs_refs = obs_refs.select(intersects)
# Make a reflection predictor and re-predict for all these reflections. The
# result is the same, but we gain also the flags and xyzcal.px columns
ref_predictor = ScansExperimentsPredictor(experiments)
obs_refs["id"] = flex.int(len(obs_refs), 0)
obs_refs = ref_predictor(obs_refs)
# Set 'observed' centroids from the predicted ones
obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"]
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs), (im_width / 2.0) ** 2)
obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
# The total number of observations should be 1128
assert len(obs_refs) == 1128
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
#####################################
# Select reflections for refinement #
#####################################
refman = ReflectionManager(
obs_refs, experiments, outlier_detector=None, close_to_spindle_cutoff=0.1
)
##############################
# Set up the target function #
##############################
# The current 'achieved' criterion compares RMSD against 1/3 the pixel size and
# 1/3 the image width in radians. For the simulated data, these are just made up
mytarget = LeastSquaresPositionalResidualWithRmsdCutoff(
experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None
)
######################################
# Set up the LSTBX refinement engine #
######################################
overrides = """minimiser.parameters.engine=GaussNewton
minimiser.parameters.logfile=None"""
refiner = setup_minimiser.Extract(
master_phil, mytarget, pred_param, local_overrides=overrides
).refiner
refiner.run()
assert mytarget.achieved()
assert refiner.get_num_steps() == 1
assert approx_equal(
mytarget.rmsds(), (0.00508252354876, 0.00420954552156, 8.97303428289e-05)
)
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
######################################################
# Set up the LBFGS with curvatures refinement engine #
######################################################
overrides = """minimiser.parameters.engine=LBFGScurvs
minimiser.parameters.logfile=None"""
refiner = setup_minimiser.Extract(
master_phil, mytarget, pred_param, local_overrides=overrides
).refiner
refiner.run()
assert mytarget.achieved()
assert refiner.get_num_steps() == 9
assert approx_equal(
mytarget.rmsds(), (0.0558857700305, 0.0333446685335, 0.000347402754278)
)
|
dials/dials
|
tests/algorithms/refinement/test_refinement_regression.py
|
Python
|
bsd-3-clause
| 10,585
|
[
"CRYSTAL"
] |
fb784ad678f6066b5f22ac54ed4cc474fdb25299d73937ce4ab188275090cfd4
|
##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2013 University of Luxembourg/Luxembourg Centre for Systems Biomedicine
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Kenneth Hoste
# Authors:: George Tsouloupas <g.tsouloupas@cyi.ac.cy>, Fotis Georgatos <fotis.georgatos@uni.lu>
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing BWA, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: George Tsouloupas <g.tsouloupas@cyi.ac.cy>
"""
import os
import shutil
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_BWA(ConfigureMake):
"""
Support for building BWA
"""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to BWA."""
super(EB_BWA, self).__init__(*args, **kwargs)
self.files = []
def configure_step(self):
"""
Empty function as bwa comes with _no_ configure script
"""
self.files = ["bwa", "qualfa2fq.pl", "xa2multi.pl"]
if LooseVersion(self.version) < LooseVersion("0.7.0"):
# solid2fastq was dropped in recent versions because the same functionality is covered by other tools already
# cfr. http://osdir.com/ml/general/2010-10/msg26205.html
self.files.append("solid2fastq.pl")
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in self.files:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err))
def sanity_check_step(self):
"""Custom sanity check for BWA."""
custom_paths = {
'files': ["bin/%s" % x for x in self.files],
'dirs': []
}
super(EB_BWA, self).sanity_check_step(custom_paths=custom_paths)
|
hajgato/easybuild-easyblocks
|
easybuild/easyblocks/b/bwa.py
|
Python
|
gpl-2.0
| 2,458
|
[
"BWA"
] |
af5dd994f65024f2716643070af5686743dc4790e7449de2000716ae24cb71da
|
from prisoner.gateway.ServiceGateway import ServiceGateway, WrappedResponse
import prisoner.SocialObjects as SocialObjects
import datetime # Used for creating standardised date / time objects from Facebook's attribute values.
import json # Used for parsing responses from Facebook.
import md5 # Used for generating unique state.
import random # Used for generating unique state.
import sys # Used for displaying error message names and descriptions.
import traceback
import urllib2 # Used for formatting URI params, reading web addresses, etc.
import urllib # Used for formatting URI params, reading web addresses, etc.
import urlparse # Used for reading Facebook access token.
class FacebookServiceGateway(ServiceGateway):
"""
Service gateway for Facebook.
This gateway interacts with Facebook directly by making calls via the network's Social Graph API.
The Facebook Service Gateway allows you to access Facebook from PRISONER
experiments. In order to use Facebook, you must register an app with the
Facebook Developers portal and provide three additional props in your
experimental design file. The app_id and app_secret props correspond to the
values for your app, and the api_version prop dictates which version of the
Facebook API your experiment targets. At this time, only "2.0" is an
acceptable API version. See the documentation on key concepts for guidance on
using props in experimental designs.
"""
def __init__(self, access_token=None, props={}, policy=None):
"""
Initialises itself with PRISONER's App ID and App Secret.
:param access_token: This parameter is deprecated.
:type access_token: str
:param props: Dictionary of Facebook-specific properties.
:type props: dict
:type policy: The current PolicyProcessor instance.
:type policy: PolicyProcessor
"""
self.props = props
self.policy = policy
# map from object names to the required FB permission(s)
self.perm_maps = {
"Person":["public_profile"],
"education":["user_education_history"],
"work": ["user_work_history"],
"relationshipStatus":["user_relationships"],
"interestedIn": ["user_relationship_details"],
"religion": ["user_religion_politics"],
"politicalViews": ["user_religion_politics"],
"hometown":["user_hometown"],
"birthday": ["user_birthday"],
"bio": ["user_about_me"],
"location": ["user_location"],
"Music": ["user_likes"],
"Like": ["user_likes"],
"Movie": ["user_likes"],
"Book": ["user_likes"],
"Note": ["user_status", "user_posts", "publish_actions"],
"Friends": ["user_friends"],
"Album": ["user_photos"],
"Image": ["user_photos"],
"Checkin": ["user_tagged_places"]
}
self.perms = self.generate_permissions_list()
# Gateway details.
self.service_name = "Facebook"
self.service_description = "Connect and share with people you know."
# App details.
#self.app_id = "123177727819065"
#self.app_secret = "ffccbab29c959b17bf53c8d200321c12"
self.app_id = props['app_id']
self.app_secret = props['app_secret']
# URI references.
self.redirect_uri = None # Placeholder. This will be initialised by request_authorisation()
self.auth_request_uri = "https://www.facebook.com/dialog/oauth?"
self.auth_token_uri = "https://graph.facebook.com/oauth/access_token?"
if props['api_version']:
self.graph_uri = "https://graph.facebook.com/v%s" % props['api_version']
else:
self.graph_uri = "https://graph.facebook.com"
self.facebook_uri = "https://www.facebook.com/";
# Generate a unique state. (Required by Facebook for security)
r = random.random()
self.state = md5.new(str(r)).hexdigest()
# Set the scope for our app. (What permissions do we need?)
self.scope = str(self.perms).strip("[]").replace(" ","").replace("'","")
# Placeholders.
self.access_token = None
self.session = None
def request_handler(self, request, operation, payload, extra_args=None):
""" Wrapper around object requests. Used to inject any necessary debug headers.
:param request: A method instance on this service gateway
:type request: method
:param operation: A HTTP method of this request (ie. GET or POST)
:type operation: str
:param payload: The criteria for this request, ie. which objects to retrieve,
or data to publish
:param extra_args: A dictionary of arguments to further filter this query
(eg. limit)
:type extra_args: dict
:returns: A WrappedResponse with any additional headers injected
"""
self.props['args'] = extra_args
resp = request(operation, payload)
headers = {}
if "debug" in self.props:
headers['PRISONER-FB-Permissions'] = self.perms
return WrappedResponse(resp, headers)
def generate_permissions_list(self):
"""
Generates a list of permissions based on the experiment's privacy policy.
:returns: List of permissions
"""
processor = self.policy
policy = processor.privacy_policy
query_path = ("//policy")
elements = policy.xpath(query_path)
perms = []
for elem in elements:
obj = elem.attrib['for'].split(":")
if len(obj) > 1:
obj = obj[1]
else:
obj = obj[0]
if obj != "User":
if obj in self.perm_maps:
perms = list(set(perms+ self.perm_maps[obj]))
else:
perms.append("public_profile")
xpath = "//policy[@for='Facebook:%s']//attributes" % obj
atts = policy.xpath(xpath)
for att in atts[0]:
att_type = att.get('type')
if att_type in self.perm_maps:
perms = list(set(perms+ self.perm_maps[att_type]))
return perms
def request_authentication(self, callback):
"""
Initiates Facebook's authentication process.
Returns a URI at which the user can confirm access to their profile by the application.
:param callback: PRISONER's authentication flow URL. User must be redirected here after registering with Facebook
in order to continue the flow.
:type callback: str
:returns: URI the user must visit in order to authenticate.
"""
# Save the callback URI as our redirect. This is required by Facebook / OAuth.
# (Redirect URIs for authentication and requesting token must match)
self.redirect_uri = callback
# Parameters for the authorisation request URI.
params = {}
params["client_id"] = self.app_id
params["redirect_uri"] = self.redirect_uri
params["scope"] = self.scope
params["state"] = self.state
# Compose request URI.
uri = self.auth_request_uri + urllib.urlencode(params)
return uri
def complete_authentication(self, request):
"""
Completes authentication. Extracts the "code" param that Facebook provided and exchanges it for an
access token so we can make authenticated calls on behalf of the user.
:param request: Response from the first stage of authentication.
:type request: HTTPRequest
:returns: Unique access token that should persist for this user.
"""
# Before doing this, could check that our state value matches the state returned by Facebook. (Later addition)
facebook_code = None
if (request.args.has_key("code")):
facebook_code = request.args['code']
else:
return False
# Parameters for the token request URI.
params = {}
params["code"] = facebook_code
params["client_secret"] = self.app_secret
params["redirect_uri"] = self.redirect_uri
params["client_id"] = self.app_id
# Load the token request URI and get its response parameters.
token_request_uri = self.auth_token_uri + urllib.urlencode(params)
response = urlparse.parse_qs(urllib.urlopen(token_request_uri).read())
# Parse response to get access token and expiry date.
access_token = None
expires = None
self.access_token = response["access_token"][0]
expires = response["expires"][0]
# Create a User() object for the authenticated user.
auth_user = Person()
# Query Facebook to get the authenticated user's ID and username.
result_set = self.get_graph_data("/me")
auth_user.id = self.get_value(result_set, "id")
auth_user.username = self.get_value(result_set, "username")
# Set up session.
self.session = auth_user
return self.access_token
def restore_authentication(self, access_token):
"""
Provides a mechanism to restore a session. (Essentially refresh an access token)
Facebook does not allow access tokens to be refreshed. However, if the user is forced to go through the
authentication process again, it will be done transparently so long as the PRISONER app has not requested
additional permissions.
:param access_token: The current access token held for this user.
:type access_token: str
:returns: False, thus forcing the authentication process to take place again. (Transparently)
"""
return False
def Session(self):
"""
The Facebook session exposes the authenticated user as an instance of User().
Can also be accessed in the same way as Person() as this class simply extends it.
:returns: session object
"""
return self.session
def Person(self, operation, payload):
"""
Performs operations relating to people's profile information.
Currently only supports GET operations. This allows us to, given a suitable
payload such as a Person() object, retrieve the information they have added to Facebook. (Eg: Full name, education, religion...)
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() object whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A Person() object with all available attributes populated.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their info.
user_id = payload
user_details = self.get_graph_data("/" + user_id)
# Create user object.
user = Person()
# Create author object for future use.
author = Person()
author.id = user_id
user.author = author
# Basic information.
user.id = self.get_value(user_details, "id")
user.firstName = self.get_value(user_details, "first_name")
user.middleName = self.get_value(user_details, "middle_name")
user.lastName = self.get_value(user_details, "last_name")
user.username = self.get_value(user_details, "username")
user.displayName = self.get_value(user_details, "name")
user.gender = self.get_value(user_details, "gender")
user.email = self.get_value(user_details, "email")
user.url = "https://www.facebook.com/" + user_id
# Get a list of the user's languages.
languages = self.get_value(user_details, "languages")
# Language info was supplied.
if ((languages) and (len(languages)) > 0):
# Create list to hold languages.
lang_list = []
# Loop through languages and add to list.
for lang in languages:
this_lang = lang["name"]
lang_list.append(this_lang)
user.languages = lang_list
# No info.
else:
user.languages = None
# Timezone.
user.timezone = self.get_value(user_details, "timezone")
# Parse the user's last update time.
updated_time_str = self.get_value(user_details, "updated_time")
timestamp = self.str_to_time(updated_time_str)
user.updatedTime = timestamp
# About / short biography.
user.bio = self.get_value(user_details, "bio")
# Parse the user's birthday.
birthday_str = self.get_value(user_details, "birthday")
birthday_timestamp = self.str_to_time(birthday_str)
user.birthday = birthday_timestamp
# Get a list detailing the user's education history.
education_list = self.get_value(user_details, "education")
edu_coll = SocialObjects.Collection()
edu_coll.author = author
# Education information exists.
if ((education_list) and (len(education_list) > 0)):
# Create list to hold places.
edu_list = []
# Loop through places and add to list.
for place in education_list:
this_place = SocialObjects.Place()
this_place.author = author
this_place.id = place["school"]["id"]
this_place.displayName = place["school"]["name"]
edu_list.append(this_place)
edu_coll.objects = edu_list
# Add education info to User object.
user.education = edu_coll
# Get a list detailing the user's work history.
work_coll = SocialObjects.Collection()
work_coll.author = author
work_history = self.get_value(user_details, "work")
# Info exists.
if ((work_history) and (len(work_history) > 0)):
# Create Collection object to hold work history.
work_list = []
# Loop through places and add to list.
for place in work_history:
this_place = SocialObjects.Place()
this_place.id = place["employer"]["id"]
this_place.displayName = place["employer"]["name"]
work_list.append(this_place)
work_coll.objects = work_list
# Add work info to User object.
user.work = work_coll
# Make a Place object for the user's hometown.
hometown_place = SocialObjects.Place()
hometown_info = self.get_value(user_details, "hometown")
# Hometown supplied.
if (hometown_info):
hometown_place.id = hometown_info["id"]
hometown_place.displayName = hometown_info["name"]
user.hometown = hometown_place
# Not supplied, so use an empty Place object.
else:
user.hometown = SocialObjects.Place()
# Make a Place object for the user's current location.
location_place = SocialObjects.Place()
location_info = self.get_value(user_details, "location")
# Location supplied.
if (location_info):
location_place.id = location_info["id"]
location_place.displayName = location_info["name"]
user.location = location_place
# Location not supplied.
else:
user.location = SocialObjects.Place()
# Additional info.
user.interestedIn = self.get_value(user_details, "interested_in")
user.politicalViews = self.get_value(user_details, "political")
user.religion = self.get_value(user_details, "religion")
user.relationshipStatus = self.get_value(user_details, "relationship_status")
# Make a User object for the user's significant other.
sig_other = Person()
sig_other_info = self.get_value(user_details, "significant_other")
# Info exists.
if (sig_other_info):
sig_other.id = sig_other_info["id"]
sig_other.displayName = sig_other_info["name"]
user.significantOther = sig_other
# No info.
else:
user.significantOther = Person()
# Get the user's profile picture.
img = SocialObjects.Image()
img.fullImage = self.graph_uri + "/me/picture?type=large" + "&access_token=" + self.access_token
user.image = img
print "User() function returned successfully."
return user
except:
print "User() function exception:"
print sys.exc_info()[0]
return Person()
else:
raise NotImplementedError("Operation not supported.")
def Music(self, operation, payload):
"""
Performs operations relating to people's musical tastes.
Currently only supports GET operations, so we can just get the bands a person / user likes.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A list of the bands this person likes.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their info.
user_id = payload
# Create author object.
author = SocialObjects.Person()
author.id = user_id
# Get the initial result set.
result_set = self.get_graph_data("/" + user_id + "/music")
band_obj_list = []
# While there are still more bands, add them to the list.
while (result_set["paging"].has_key("next")):
# Get bands.
band_obj_list.extend(result_set["data"])
# Get next result set.
result_set = self.get_graph_data(result_set["paging"]["next"])
# Loop through the band object list and add their names to a separate list.
bands = []
for band in band_obj_list:
# Create an object for this band.
this_band = Music()
this_band.displayName = self.get_value(band, "name")
this_band.id = self.get_value(band, "id")
this_band.url = "https://www.facebook.com/" + this_band.id
this_band.author = author
bands.append(this_band)
# Create a collection object to hold the list.
bands_coll = SocialObjects.Collection()
bands_coll.author = author
bands_coll.provider = "Facebook"
bands_coll.objects = bands
# Return.
print "Music() function returned successfully."
return bands_coll
except:
print "Music() function exception:"
print sys.exc_info()[0]
return SocialObjects.Collection()
else:
raise NotImplementedException("Operation not supported.")
def Like(self, operation, payload):
"""
Returns a user's liked pages.
Only supports GET operations.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A list of pages this person likes.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their info.
user_id = payload
# Create author object.
author = SocialObjects.Person()
author.id = user_id
# Get the initial result set.
result_set = self.get_graph_data("/" + user_id + "/likes")
like_obj_list = []
# While there are still more likes available, add them to the list.
while ((result_set.has_key("paging")) and (result_set["paging"].has_key("next"))):
# Get movies.
like_obj_list.extend(result_set["data"])
# Get next result set.
result_set = self.get_graph_data(result_set["paging"]["next"])
# Loop through the movie object list and add their names to a separate list.
likes = []
for like in like_obj_list:
# Create an object for this movie.
this_like = Page()
this_like.displayName = self.get_value(like, "name")
this_like.id = self.get_value(like, "id")
this_like.url = "https://www.facebook.com/" + this_like.id
this_like.author = author
this_like.category = self.get_value(like,"category")
this_like.image = self.graph_uri + "/" + this_like.id + "/picture?type=large" + "&access_token=" + self.access_token
likes.append(this_like)
# Create a collection object to hold the list.
likes_coll = SocialObjects.Collection()
likes_coll.author = author
likes_coll.provider = "Facebook"
likes_coll.objects = likes
# Return.
print "Like() function returned successfully."
return likes_coll
except:
print "Like() function exception:"
print sys.exc_info()[0]
raise
return SocialObjects.Collection()
else:
raise NotImplementedException("Operation not supported.")
def Movie(self, operation, payload):
"""
Performs operations relating to people's taste in films.
Currently only supports GET operations. This lets us retrieve the movies / films people like.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A list of the movies this person likes.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their info.
user_id = payload
# Create author object.
author = SocialObjects.Person()
author.id = user_id
# Get the initial result set.
result_set = self.get_graph_data("/" + user_id + "/movies")
movie_obj_list = []
# While there are still more movies available, add them to the list.
while ((result_set.has_key("paging")) and (result_set["paging"].has_key("next"))):
# Get movies.
movie_obj_list.extend(result_set["data"])
# Get next result set.
result_set = self.get_graph_data(result_set["paging"]["next"])
# Loop through the movie object list and add their names to a separate list.
movies = []
for movie in movie_obj_list:
# Create an object for this movie.
this_movie = Movie()
this_movie.displayName = self.get_value(movie, "name")
this_movie.id = self.get_value(movie, "id")
this_movie.url = "https://www.facebook.com/" + this_movie.id
this_movie.author = author
movies.append(this_movie)
# Create a collection object to hold the list.
movies_coll = SocialObjects.Collection()
movies_coll.author = author
movies_coll.provider = "Facebook"
movies_coll.objects = movies
# Return.
print "Movie() function returned successfully."
return movies_coll
except:
print "Movie() function exception:"
print sys.exc_info()[0]
return SocialObjects.Collection()
else:
raise NotImplementedException("Operation not supported.")
def Book(self, operation, payload):
"""
Performs operations relating to people's taste in books and literature.
Currently only supports GET operations. This lets us get the books / authors people are into.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A list of the books this person likes.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their info.
user_id = payload
# Create author object.
author = SocialObjects.Person()
author.id = user_id
# Get the initial result set.
result_set = self.get_graph_data("/" + user_id + "/books")
book_obj_list = []
# While there are still more books available, add them to the list.
while ((result_set.has_key("paging")) and (result_set["paging"].has_key("next"))):
# Get books.
book_obj_list.extend(result_set["data"])
# Get next result set.
result_set = self.get_graph_data(result_set["paging"]["next"])
# Loop through the book object list and add their names to a separate list.
books = []
for book in book_obj_list:
# Create an object for this book.
this_book = Book()
this_book.displayName = self.get_value(book, "name")
this_book.id = self.get_value(book, "id")
this_book.url = "https://www.facebook.com/" + this_book.id
this_book.author = author
books.append(this_book)
# Create a collection object to hold the list.
books_coll = SocialObjects.Collection()
books_coll.author = author
books_coll.provider = "Facebook"
books_coll.objects = books
# Return.
return books_coll
except:
print "Book() function exception:"
print sys.exc_info()[0]
return SocialObjects.Collection()
else:
raise NotImplementedException("Operation not supported.")
def Note(self, operation, payload):
"""
Performs operations on a user's status updates.
Currently only supports GET operations. This lets us retrieve a user's entire backlog of status updates.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: SocialObject
:returns: A collection representing this person's backlog of status updates.
"""
if(operation == "POST"):
# convert SO dict to fb call
call_dict = {
"message":payload.content,
"link":payload.link
}
if(payload.privacy):
privacy = "{'value':'CUSTOM', 'allow': '%s'}" % payload.privacy
call_dict['privacy'] = privacy
response = self.post_graph_data("/me/feed", call_dict)
elif (operation == "GET"):
try:
# do we have a limit
limit = None
if "limit" in self.props['args']:
limit = self.props['args']['limit']
# Get user ID and query Facebook for their info.
user_id = payload
status_coll = StatusList()
status_list = []
# Create author object for this collection.
author = SocialObjects.Person()
author.id = user_id
status_coll.author = author
# Get the initial result set.
result_set = self.get_graph_data("/" + user_id + "/feed")
# Page limit for testing.
page_limit = 50
page = 0
# So long as there's data, parse it.
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0) and (page < page_limit)):
# Get status updates in this batch.
this_data = result_set["data"]
# For each update...
for status in this_data:
status_type = self.get_value(status, "type")
status_author = self.get_value(status["from"], "id")
# Ensure this is a real status update. (Not a story, comment, etc.)
if (status.has_key("message")):
# Ensure the current user posted this update and it is a valid status object.
if (((status_type == "status") or (status_type == "link") or (status_type == "photo")) and (status_author == user_id)):
# Set basic info.
this_status = Note()
author = SocialObjects.Person()
author.id = user_id
this_status.author = author
this_status.content = self.get_value(status, "message")
this_status.id = self.get_value(status, "id")
this_status.published = self.str_to_time(self.get_value(status, "created_time"))
id_components = this_status.id.split("_")
this_status.url = "https://www.facebook.com/" + user_id + "/posts/" + id_components[1]
# Privacy info. (If available)
if (status.has_key("privacy")):
this_status.privacy = self.get_value(status["privacy"], "description")
# Parse likes. (Initial limit of 25 per status)
likes_coll = Likes()
likes_coll.objects = self.parse_likes(status)
this_status.likes = likes_coll
# Parse comments. (Initial limit of 25 per status)
comments_coll = Comments()
comments_coll.objects = self.parse_comments(status)
this_status.comments = comments_coll
# Parse location.
this_status.location = self.parse_location(status)
# Add status to our list of statuses.
status_list.append(this_status)
# Compose next address.
page += 1
if limit and len(status_list) >= limit:
break
if("paging" in result_set and "next" in result_set["paging"]):
next_address = result_set["paging"]["next"]
result_set = self.get_graph_data(next_address)
else:
break
if limit:
status_list = status_list[:limit]
# Add the status list to our collection.
status_coll.objects = status_list
# Return statuses.
print "Status() function returned successfully."
return status_coll
except:
print "Status() function exception:"
print sys.exc_info()[0]
return StatusList()
else:
raise NotImplementedException("Operation not supported.")
def Friends(self, operation, payload):
"""
Performs operations on a user's friends.
Only supports GET operations. This lets us retrieve someone's entire friends list.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: Person
:returns: A collection representing this person's friends list.
"""
if (operation == "GET"):
try:
# Get user ID and query Facebook for their friends.
user_id = payload
fields = "name,id,hometown,location,education,work"
result_set = self.get_graph_data("/" + user_id + "/friends?fields=%s" % fields)
friend_coll = Friends()
friend_obj_list = []
# Create author object for this collection.
author = SocialObjects.Person()
author.id = user_id
friend_coll.author = author
# While there is still data available...
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
# Grab the current batch of friends.
this_data = result_set["data"]
# For each friend in this batch...
for friend in this_data:
# Get basic info for this friend.
this_friend = Person()
this_friend.id = self.get_value(friend, "id")
this_friend.displayName = self.get_value(friend, "name")
this_friend.url = "https://www.facebook.com/" + this_friend.id
user_details = friend
# Create author object for this friend. (User "has" their friends)
author = SocialObjects.Person()
author.id = user_id
this_friend.author = author
# Compose profile pic address.
profile_pic = SocialObjects.Image()
profile_pic.fullImage = self.graph_uri + "/" + this_friend.id + "/picture?type=large" + "&access_token=" + self.access_token
profile_pic.author = this_friend.id
this_friend.image = profile_pic
# Add friend to list.
friend_obj_list.append(this_friend)
# Get next set of results.
next_address = result_set["paging"]["next"]
result_set = self.get_graph_data(next_address)
# Add friend list to collection and return.
friend_coll.objects = friend_obj_list
print "Friends() function returned successfully."
return friend_coll
except:
print "Friends() function exception:"
print traceback.format_exc()
return FriendsList()
else:
raise NotImplementedException("Operation not supported.")
def Album(self, operation, payload):
"""
Performs operations on a user's photo albums.
Currently only supports GET operations. This lets us retrieve a list of photo albums associated with the
supplied payload ID.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: A Person() whose ID is either a Facebook UID or username.
:type payload: Person
:returns: A collection representing this person / object's photo albums.
"""
if (operation == "GET"):
try:
# Get the object's ID from the payload and query for albums.
obj_id = payload
result_set = self.get_graph_data("/" + obj_id + "/albums")
album_coll = Albums()
album_obj_list = []
# Create author object for this collection.
author = SocialObjects.Person()
author.id = obj_id
album_coll.author = author
# While there is still data available...
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
# Grab the current batch of albums.
this_data = result_set["data"]
for album in this_data:
# Set basic album info.
this_album = Album()
this_album.id = self.get_value(album, "id")
this_album.displayName = self.get_value(album, "name")
# Author info.
author = SocialObjects.Person()
author.id = self.get_value(album["from"], "id")
this_album.author = author
this_album.published = self.str_to_time(self.get_value(album, "created_time"))
this_album.summary = self.get_value(album, "description")
this_album.updated = self.str_to_time(self.get_value(album, "updated_time"))
this_album.url = self.get_value(album, "link")
# Parse location info.
this_album.location = self.parse_location(album)
# Cover photo.
cover_photo = SocialObjects.Image()
cover_photo.author = this_album.author
cover_id = self.get_value(album, "cover_photo")
# Only compose a cover photo if one exists.
if (cover_id):
cover_photo.fullImage = self.graph_uri + "/" + cover_id + "/picture?type=normal" + "&access_token=" + self.access_token
this_album.coverPhoto = cover_photo
# Set additional info.
this_album.privacy = self.get_value(album, "privacy")
this_album.count = self.get_value(album, "count")
this_album.albumType = self.get_value(album, "type")
# Parse likes.
likes_list = self.parse_likes(album)
album_likes = Likes()
album_likes.objects = likes_list
this_album.likes = album_likes
# Parse comments.
comments_list = self.parse_comments(album)
album_comments = Comments()
album_comments.objects = comments_list
this_album.comments = album_comments
# Add this album to our list of albums.
album_obj_list.append(this_album)
if "next" not in result_set["paging"]:
break
# Get next set of results.
next_address = result_set["paging"]["next"]
result_set = self.get_graph_data(next_address)
# Populate and return our album collection.
album_coll.objects = album_obj_list
print "Album() function returned successfully."
return album_coll
except:
print "Album() function exception:"
raise
print sys.exc_info()[0]
return Albums()
else:
raise NotImplementedException("Operation not supported.")
def Photo(self, operation, payload):
"""
Performs operations on images.
Currently only supports GET operations. This lets us retrieve the photos associated with the supplied
payload's ID. This will commonly be an Album() to get the photos in said album, or a User() / Person()
to get any photos they're tagged in.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: The Facebook object to retrieve associated photos for.
:type payload: SocialObject
:returns: A collection representing photos associated with the supplied object.
"""
if (operation == "GET"):
try:
# Get the payload object's ID.
obj_id = payload
result_set = self.get_graph_data("/" + obj_id + "/photos/uploaded")
photo_obj_list = []
# Author info for individual photos and the collection.
author = SocialObjects.Person()
author.id = obj_id
# While there is still data available...
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
# Grab the current batch of photos.
this_data = result_set["data"]
# For each photo in this batch...
for photo in this_data:
# Create photo object and set basic info.
this_photo = Image()
this_photo.id = self.get_value(photo, "id")
this_photo.displayName = self.get_value(photo, "name")
this_photo.published = self.str_to_time(self.get_value(photo, "created_time"))
this_photo.updated = self.str_to_time(self.get_value(photo, "updated_time"))
this_photo.url = self.get_value(photo, "link")
this_photo.position = self.get_value(photo, "position")
this_photo.author = author
# Get image info.
img_normal = SocialObjects.Image()
img_normal.id = this_photo.id
img_normal.author = author
img_normal.fullImage = self.get_value(photo["images"][0], "source")
this_photo.image = img_normal
# Image dimensions.
this_photo.width = self.get_value(photo["images"][0], "width")
this_photo.height = self.get_value(photo["images"][0], "height")
# Thumbnail info.
img_small = SocialObjects.Image()
img_small.id = this_photo.id
img_small.author = author
img_small.fullImage = self.get_value(photo, "picture")
this_photo.thumbnail = img_small
# Parse location info.
this_photo.location = self.parse_location(photo)
# Parse likes.
likes_list = self.parse_likes(photo)
photo_likes_coll = Likes()
photo_likes_coll.objects = likes_list
this_photo.likes = photo_likes_coll
# Parse comments.
comments_list = self.parse_comments(photo)
photo_comments_coll = Comments()
photo_comments_coll.objects = comments_list
this_photo.comments = photo_comments_coll
# Parse tags.
tags_list = self.parse_tags(photo)
photo_tags_coll = Tags()
photo_tags_coll.objects = tags_list
this_photo.tags = photo_tags_coll
# Add photo to list.
photo_obj_list.append(this_photo)
if "next" not in result_set["paging"]:
break
# Get next set of results.
next_address = result_set["paging"]["next"]
result_set = self.get_graph_data(next_address)
# Create a collection object for the photos.
photo_album = Images()
photo_album.objects = photo_obj_list
photo_album.author = author
print "Photo() function returned successfully."
return photo_album
except:
print "Photo() function exception:"
#raise
print sys.exc_info()[0]
return Images()
else:
raise NotImplementedException("Operation not supported")
def Checkin(self, operation, payload):
"""
Performs operations on check-ins / objects with location.
Currently only supports GET operations. This lets us retrieve a list of places the supplied User()
or Person() has been.
:param operation: The operation to perform. (GET)
:type operation: str
:param payload: The Person() object to retrieve check-in information for.
:type payload: SocialObject
:returns: A collection of objects representing check-ins.
"""
if (operation == "GET"):
try:
# Get user ID from payload and query for initial result set.
user_id = payload
result_set = self.get_graph_data("/" + user_id + "/tagged_places")
checkin_obj_list = []
# Author info.
author = SocialObjects.Person()
author.id = user_id
# While there is still data available...
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
# Grab the current batch of check-ins.
this_data = result_set["data"]
# Loop through each check-in on this page.
for checkin in this_data:
# Get and set basic info.
this_checkin = Checkin()
# author posted this
author = SocialObjects.Person()
author.id = user_id
#author.displayName = checkin["from"]["name"]
this_checkin.id = self.get_value(checkin, "id")
this_checkin.author = author
#this_checkin.checkinType = self.get_value(checkin, "type")
this_checkin.checkinType = "status"
this_checkin.published = self.str_to_time(self.get_value(checkin, "created_time"))
# Get location info.
this_checkin.location = self.parse_location(checkin)
this_checkin.image = self.graph_uri + "/" + this_checkin.id + "/picture?type=large" + "&access_token=" + self.access_token
# Get tag info. (People that've been tagged in this check-in)
tags_list = self.parse_tags(checkin)
tags_coll = Tags()
tags_coll.objects = tags_list
this_checkin.tags = tags_coll
checkin_obj_list.append(this_checkin)
# Get next set of results.
if("paging" in result_set and "next" in result_set["paging"]):
next_address = result_set["paging"]["next"]
result_set = self.get_graph_data(next_address)
else:
break
# Compose collection and return it.
checkins_coll = Checkins()
checkins_coll.objects = checkin_obj_list
checkins_coll.author = author
print "Checkin() function returned successfully."
return checkins_coll
except:
print "Checkin() function exception:"
#print sys.exc_info()[0]
print traceback.format_exc()
return Checkins()
else:
raise NotImplementedException("Operation not supported")
def parse_likes(self, facebook_obj):
"""
Internal function.
Takes a JSON Facebook object and returns a list of the people who've liked it.
Note that this function just PARSES. It does not attempt to retrieve all the likes for the given
object. This means it has a limit of around 25 likes.
:param facebook_obj: The Facebook object to get likes for.
:type facebook_obj: Dict
:returns: A list representing the people / users that have liked this object.
"""
# This object has a "Likes" attribute.
if (facebook_obj.has_key("likes")):
# This object has likes.
if (facebook_obj["likes"].has_key("data")):
likes = []
have_liked = facebook_obj["likes"]["data"]
# Loop through likes and add them to our list.
for person in have_liked:
this_person = Person()
this_person.id = self.get_value(person, "id")
this_person.displayName = self.get_value(person, "name")
likes.append(this_person)
return likes
# No likes, return an empty list.
else:
return []
# No likes, return an empty list.
else:
return []
def parse_comments(self, facebook_obj):
"""
Internal function.
Takes a JSON Facebook object and returns a list of the comments on it.
Note that this function just PARSES. It does not attempt to retrieve all the comments on the given
object. This means it has a limit of around 25 comments.
:param facebook_obj: The Facebook object to get comments on.
:type facebook_obj: Dict
:returns: A list representing the comments on this object.
"""
# This object has a "Comments" attribute.
if (facebook_obj.has_key("comments")):
# This object has comments.
if (facebook_obj["comments"].has_key("data")):
comments = []
comments_on = facebook_obj["comments"]["data"]
# Loop through comments and add them to our list.
for comment in comments_on:
this_comment = Comment()
this_comment.id = self.get_value(comment, "id")
author = SocialObjects.Person()
author.id = self.get_value(comment["from"], "id")
this_comment.author = author
this_comment.content = self.get_value(comment, "message")
this_comment.published = self.str_to_time(self.get_value(comment, "created_time"))
this_comment.url = "https://www.facebook.com/me/posts/" + this_comment.id
comments.append(this_comment)
return comments
# No comments, return empty list.
else:
return []
# No comments, return an empty list.
else:
return []
def parse_location(self, facebook_obj):
"""
Internal function.
Takes a JSON Facebook object and returns a Place object representing its location.
:param facebook_obj: The Facebook object to get the location of.
:type facebook_obj: Dict
:returns: A Place() object representing the location of the supplied object.
"""
# Get location.
try:
if (facebook_obj.has_key("place")):
place = SocialObjects.Place()
place.id = self.get_value(facebook_obj["place"], "id")
place.displayName = self.get_value(facebook_obj["place"], "name")
# pull information from the page for this location
result_set = self.get_graph_data("/" + place.id)
category = self.get_value(result_set,"category")
try:
image = result_set["cover"]["source"]
except:
image = None
place.category = category
place.image = image
# Get additional location info if it's present.
if (facebook_obj["place"].has_key("location")):
latitude = str(self.get_value(facebook_obj["place"]["location"], "latitude"))
longitude = str(self.get_value(facebook_obj["place"]["location"], "longitude"))
# Format latitude if necessary.
if (not latitude.startswith("-")):
latitude = "+" + latitude
# Format longitude if necessary.
if (not longitude.startswith("-")):
longitude = "+" + longitude
place.position = "%s %s" % (latitude,longitude)
# Get address info if available.
if ((facebook_obj["place"]["location"].has_key("city")) and (facebook_obj["place"]["location"].has_key("country"))):
street = self.get_value(facebook_obj["place"]["location"], "street")
city = self.get_value(facebook_obj["place"]["location"], "city")
country = self.get_value(facebook_obj["place"]["location"], "country")
#place.address = street + ", " + city + ", " + country
else:
place.address = None
# Return place object.
return place
# Return empty place.
else:
return SocialObjects.Place()
except:
return SocialObjects.Place()
def parse_tags(self, facebook_obj):
"""
Internal function.
Takes a JSON Facebook object and returns a list of the objects that have been tagged
in it. (Usually people)
:param facebook_obj: The Facebook object to get tags for.
:type facebook_obj: Dict
:returns: A list representing the people / objects that were tagged in the supplied object.
"""
# This object has tags.
if (facebook_obj.has_key("tags")):
tags = []
are_tagged = facebook_obj["tags"]["data"]
# Loop through the tags and add them to our list.
for person in are_tagged:
this_person = Person()
this_person.id = self.get_value(person, "id")
this_person.displayName = self.get_value(person, "name")
tags.append(this_person)
return tags
# No likes, return an empty list.
else:
return []
def get_likes(self, object_id):
"""
Internal function.
Takes a JSON Facebook object and returns a list of the people who've liked it.
:param facebook_obj: The Facebook object to get likes for.
:type facebook_obj: Dict
:returns: A list representing the people / users that have liked this object.
"""
# Get initial likes.
likes = []
result_set = self.get_graph_data("/" + object_id + "/likes")
# Loop through all likes and add them to our list.
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
while ((result_set.has_key("paging")) and (result_set["paging"].has_key("next"))):
have_liked = result_set["data"]
# Create a User object for each like...
for person in have_liked:
this_person = Person()
this_person.id = person["id"]
this_person.displayName = person["name"]
likes.append(this_person)
# Get the next result set.
result_set = self.get_graph_data(result_set["paging"]["next"])
return likes
def get_comments(self, object_id):
"""
Internal function.
Takes a JSON Facebook object and returns a list of the comments on it.
:param facebook_obj: The Facebook object to get comments on.
:type facebook_obj: Dict
:returns: A list representing the comments on this object.
"""
# Get initial comments.
comments = []
result_set = self.get_graph_data("/" + object_id + "/comments")
# Loop through all comments and add them to our list.
while ((result_set.has_key("data")) and (len(result_set["data"]) > 0)):
while ((result_set.has_key("paging")) and (result_set["paging"].has_key("next"))):
comment_list = result_set["data"]
# Make a Comment() object for each comment in the list...
for comment in comment_list:
this_comment = Comment()
this_comment.id = comment["id"]
author = SocialObjects.Person()
author.id = self.get_value(comment["from"], "id")
this_comment.author = author
this_comment.content = comment["message"]
this_comment.published = self.str_to_time(comment["created_time"])
this_comment.url = "https://www.facebook.com/me/posts/" + this_comment.id
comments.append(this_comment)
# Get the next set of results.
result_set = self.get_graph_data(result_set["paging"]["next"])
return comments
def post_graph_data(self, query, params):
"""
Internal Function.
Post the params dictionary to the given query path on the Graph API
Use for creating, deleting, updating content
All calls must be authenticated
:param query: Graph API query to perform
:type query: str
:param params: Dictionary of data to publish to this endpoint
:type params: dict
"""
# If query doesn't start with https://, we assume it is relative.
if (not query.startswith("https://")):
query = self.graph_uri + query + "?access_token=" + self.access_token
# Retrieve and parse result.
data_req = urllib2.Request(query,
data = urllib.urlencode(params))
data_resp = urllib2.urlopen(data_req)
data = data_resp.read()
json_obj = self.parse_json(data)
return json_obj
def get_graph_data(self, query):
"""
Internal function.
Queries Facebook's Graph API and returns the result as a dict.
:param query: The Graph API query to perform. (Eg: /me/picture?access_token=...)
:type query: str
:returns: A Dict containing the parsed JSON response from Facebook. Attributes are accessed through their name.
"""
# If query doesn't start with https://, we assume it is relative.
if (not query.startswith("https://")):
if "?" not in query:
token = "?"
else:
token = "&"
query = self.graph_uri + query + token + "access_token=" + self.access_token
# Retrieve and parse result.
data = urllib2.urlopen(query).read()
json_obj = self.parse_json(data)
return json_obj
def get_value(self, haystack, needle):
"""
Internal function.
Attempts to get the value corresponding to the supplied key.
If no key exists, None is returned.
:param haystack: The Dictionary object to look at.
:type query: dict
:param needle: The key we're looking for.
:type query: str
:returns: If the key exists, its corresponding value is returned. Otherwise None is returned.
"""
# This key exists, so return it.
if haystack.has_key(needle):
return haystack[needle]
# Key doesn't exist.
else:
return None
def parse_json(self, json_obj):
"""
Internal function.
Takes a JSON object as returned by Facebook and returns the Dict representation of it.
Avoids having to call json.loads(?) everywhere, and allows for potential improvements in the future.
:param json_obj: The JSON object to parse.
:type json_obj: str, list
:returns: A Dict object representing the supplied JSON.
"""
return json.loads(json_obj)
def str_to_time(self, time):
"""
Internal function.
Used to convert Facebook's ISO-8601 date/time into a Date/Time object.
Also converts Facebook's MM/DD/YYYY format used for birthdays.
:param time: The string to parse.
:type time: str
:returns: A Date/Time object.
"""
# Check none.
if (time == None):
return None
# ISO 8601
elif (len(time) > 10):
return datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%S+0000")
# MM/DD/YYYY
else:
return datetime.datetime.strptime(time, "%m/%d/%Y")
class Person(SocialObjects.Person):
"""
Representation of a user object on Facebook.
Users are essentially the backbone of the Facebook service and such objects can contain a great deal of information.
User objects will not always have values for all their attributes, as Facebook does not require users to provide
allthis information.
"""
def __init__(self):
super(Person, self).__init__()
self._provider = "Facebook" # String
self._username = None # String
self._firstName = None # String
self._middleName = None # String
self._lastName = None # String
self._gender = None # String
self._languages = None # List of strings
self._timezone = None # String
self._updatedTime = None # Date / Time
self._bio = None # String
self._birthday = None # Date / Time
self._education = None # Collection of places
self._email = None # String
self._hometown = None # Place
self._interestedIn = None # List of strings
self._location = None # Place
self._politicalViews = None # String
self._religion = None # String
self._relationshipStatus = None # String
self._significantOther = None # User or Person object
self._work = None # Collection of places
@property
def username(self):
""" This person's Facebook username. """
return self._username
@property
def firstName(self):
""" This person's first name. """
return self._firstName
@property
def middleName(self):
""" This person's middle name. """
return self._middleName
@property
def lastName(self):
""" This person's last name. """
return self._lastName
@property
def gender(self):
""" This person's gender. """
return self._gender
@property
def languages(self):
""" Languages this person can speak. """
return self._languages
@property
def timezone(self):
""" This person's timezone. (Offset from UTC) """
return self._timezone
@property
def updatedTime(self):
""" When this person last updated their Facebook profile. """
return self._updatedTime
@property
def bio(self):
""" This person's short biography. """
return self._bio
@property
def birthday(self):
""" This person's birthday. """
return self._birthday
@property
def education(self):
""" This person's education history. """
return self._education
@property
def email(self):
""" This person's email address. """
return self._email
@property
def hometown(self):
""" This person's hometown. """
return self._hometown
@property
def location(self):
""" This person's current location. """
return self._location
@property
def interestedIn(self):
""" This person's sexual orientation. """
return self._interestedIn
@property
def politicalViews(self):
""" This person's political preferences. """
return self._politicalViews
@property
def religion(self):
""" This person's religion. """
return self._religion
@property
def relationshipStatus(self):
""" This person's relationship status. """
return self._relationshipStatus
@property
def significantOther(self):
""" This person's significant other. """
return self._significantOther
@property
def work(self):
""" This person's work history. """
return self._work
@username.setter
def username(self, value):
self._username = value
@firstName.setter
def firstName(self, value):
self._firstName = value
@middleName.setter
def middleName(self, value):
self._middleName = value
@lastName.setter
def lastName(self, value):
self._lastName = value
@gender.setter
def gender(self, value):
self._gender = value
@languages.setter
def languages(self, value):
self._languages = value
@timezone.setter
def timezone(self, value):
self._timezone = value
@updatedTime.setter
def updatedTime(self, value):
self._updatedTime = value
@bio.setter
def bio(self, value):
self._bio = value
@birthday.setter
def birthday(self, value):
self._birthday = value
@education.setter
def education(self, value):
self._education = value
@email.setter
def email(self, value):
self._email = value
@hometown.setter
def hometown(self, value):
self._hometown = value
@location.setter
def location(self, value):
self._location = value
@interestedIn.setter
def interestedIn(self, value):
self._interestedIn = value
@politicalViews.setter
def politicalViews(self, value):
self._politicalViews = value
@religion.setter
def religion(self, value):
self._religion = value
@relationshipStatus.setter
def relationshipStatus(self, value):
self._relationshipStatus = value
@significantOther.setter
def significantOther(self, value):
self._significantOther = value
@work.setter
def work(self, value):
self._work = value
class Friends(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of users / friends.
"""
def __init__(self):
super(Friends, self).__init__()
class Note(SocialObjects.Note):
"""
Representation of a status object on Facebook.
Status updates are short posts by Facebook users. They can either be entirely textual or contain a link or a photo.
As well as the basic attributes, status updates also contain a privacy setting as well as a collection of likes and
comments.
"""
def __init__(self):
super(Note, self).__init__()
self._provider = "Facebook" # String
self._privacy = None # String
self._likes = None # Collection of users
self._comments = None # Collection of comments
self._link = None
@property
def link(self):
""" A link to an external resource embedded in this status update """
return self._link
@link.setter
def link(self, value):
self._link = value
@property
def privacy(self):
""" The privacy setting for this status update. (Eg: Friends) """
return self._privacy
@property
def likes(self):
""" The people who liked this status update. """
return self._likes
@property
def comments(self):
""" The comments on this status update. """
return self._comments
@privacy.setter
def privacy(self, value):
self._privacy = value
@likes.setter
def likes(self, value):
self._likes = value
@comments.setter
def comments(self, value):
self._comments = value
class StatusList(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of statuses.
"""
def __init__(self):
super(StatusList, self).__init__()
class Likes(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of likes.
"""
def __init__(self):
super(Likes, self).__init__()
class Comment(SocialObjects.Note):
"""
Representation of a comment object on Facebook.
Comments are typically short replies / notes on objects such as statuses, photos, check-ins or just about any
other Facebook object. Comments consist of their content, an author a published date and a permalink.
"""
def __init__(self):
super(Comment, self).__init__()
self._provider = "Facebook" # String
class Comments(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of comments.
"""
def __init__(self):
super(Comments, self).__init__()
class Album(SocialObjects.SocialObject):
"""
Representation of an album object on Facebook.
Albums are created by users or apps and have a number of key attributes such as privacy and count.
Albums also have a cover photo and a type. Once you have an album's ID, you can then use Photo() to retreive
the photos it contains.
"""
def __init__(self):
super(Album, self).__init__()
self._provider = "Facebook" # String
self._coverPhoto = None # Image
self._privacy = None # String
self._count = None # Integer
self._albumType = None # String
self._photos = None # Collection of photos
self._likes = None # Collection of users
self._comments = None # Collection of comments
@property
def coverPhoto(self):
""" This album's cover photo. """
return self._coverPhoto
@property
def privacy(self):
""" The privacy setting for this album. """
return self._privacy
@property
def count(self):
""" The number of photos in this album. """
return self._count
@property
def albumType(self):
""" The album's type. (Eg: Wall, Mobile) """
return self._albumType
@property
def photos(self):
""" The images in the album. """
return self._photos
@property
def likes(self):
""" The people who've liked this album. """
return self._likes
@property
def comments(self):
""" The comments on this photo album. """
return self._comments
@coverPhoto.setter
def coverPhoto(self, value):
self._coverPhoto = value
@privacy.setter
def privacy(self, value):
self._privacy = value
@count.setter
def count(self, value):
self._count = value
@albumType.setter
def albumType(self, value):
self._albumType = value
@photos.setter
def photos(self, value):
self._photos = value
@likes.setter
def likes(self, value):
self._likes = value
@comments.setter
def comments(self, value):
self._comments = value
class Albums(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of albums.
"""
def __init__(self):
super(Albums, self).__init__()
class Image(SocialObjects.Image):
"""
Representation of a photo object on Facebook.
Photos are images uploaded by users or applications. As well as the standard attributes inherited from SocialObject,
a photo also has additional specialised attributes such as position, width and height.
A photo also contains Image() objects to represent both the full-size image as well as thumbnails.
"""
def __init__(self):
super(Image, self).__init__()
self._provider = "Facebook" # String
self._position = None # Integer
self._image = None # Image
self._thumbnail = None # Image
self._width = None # Integer
self._height = None # Integer
self._tags = None # Collection of users
self._likes = None # Collection of users
self._comments = None # Collection of comments
@property
def position(self):
""" Position of this photo in its album. """
return self._position
@property
def image(self):
""" The full size version of this photo. """
return self._image
@property
def thumbnail(self):
""" The thumbnail image for this photo. """
return self._thumbnail
@property
def width(self):
""" The width of this photo. (Pixels) """
return self._width
@property
def height(self):
""" The height of this photo. (Pixels) """
return self._height
@property
def tags(self):
""" The people who are tagged in this photo. """
return self._tags
@property
def likes(self):
""" The people who've liked this photo. """
return self._likes
@property
def comments(self):
""" The comments on this photo. """
return self._comments
@position.setter
def position(self, value):
self._position = value
@image.setter
def image(self, value):
self._image = value
@thumbnail.setter
def thumbnail(self, value):
self._thumbnail = value
@width.setter
def width(self, value):
self._width = value
@height.setter
def height(self, value):
self._height = value
@tags.setter
def tags(self, value):
self._tags = value
@likes.setter
def likes(self, value):
self._likes = value
@comments.setter
def comments(self, value):
self._comments = value
class Images(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of photos.
"""
def __init__(self):
super(Images, self).__init__()
class Tags(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of tags.
Tags are simply User() objects that have been tagged in a photo or status.
"""
def __init__(self):
super(Tags, self).__init__()
class Checkin(SocialObjects.SocialObject):
"""
Representation of a check-in.
A Facebook user can be determined to have been somewhere if they explicitly said they were there in a status,
or have been tagged in a photo that is also tagged with that location.
As well as containing basic information such as where the check-in is for and who the user was with, a check-in object
also contains a "Type" attribute that specifies how the check-in was determined. (Eg: Status, Photo...)
"""
def __init__(self):
super(Checkin, self).__init__()
self._provider = "Facebook" # String
self._checkinType = None # String
self._image = None
@property
def image(self):
return self._image
@image.setter
def image(self, value):
self._image = value
@property
def checkinType(self):
""" This check-in's type. (Eg: Status, Photo) """
return self._checkinType
@checkinType.setter
def checkinType(self, value):
self._checkinType = value
class Checkins(SocialObjects.Collection):
"""
Lightweight collection class for representing collections of check-ins.
"""
def __init__(self):
super(Checkins, self).__init__()
class Page(SocialObjects.SocialObject):
"""
Representation of a generic Facebook page / object.
Pages are used to represent entities like bands, books, films and so on.
"""
def __init__(self):
super(Page, self).__init__()
self._provider = "Facebook"
self._category = None
self._image = None
@property
def image(self):
return self._image
@image.setter
def image(self, value):
self._image = value
@property
def category(self):
return self._category
@category.setter
def category(self, value):
self._category = value
class Like(Page):
"""
A Like is just a representation of a Page
"""
def __init__(self):
super(Page, self).__init__()
class Music(Page):
"""
Stub for representing music.
"""
def __init__(self):
super(Music, self).__init__()
class Movie(Page):
"""
Stub for representing music.
"""
def __init__(self):
super(Movie, self).__init__()
class Book(Page):
"""
Stub for representing music.
"""
def __init__(self):
super(Book, self).__init__()
def check_none(value):
"""
Internal function.
Used to check to see whether or not a value is None. If so, it replaces it with N/A.
Mainly used for testing and creating string representations.
"""
if (value == None):
return "None"
else:
return value
# Testing.
if __name__ == "__main__":
# Start tests.
print "<Tests>"
# Create an instance of the service gateway.
fb = FacebookServiceGateway()
# Request authentication and print the resulting URI.
# To test, go to the address printed on-screen, sign in, then copy the "Code" param from the URI and paste it
# in the complete_authentication() method below.
response = fb.request_authentication("http://www.st-andrews.ac.uk/") # This param would be callback under real usage.
print "Request authentication URI: " + response
# Complete authentication. (Comment out the parsing of input params in complete_authentication() to use)
fb.complete_authentication("AQARnoOJST6jPEpYu4Lduyx4soRV6e6mVXozt4Pn-zr9nhZVp0ifzYPZxkQtXaRiXZtwBoWJZo9uiR7StQM3V_vFvm7kHaW3Av6Zk7Wrjh7nY_OZmNtAbhyzdw-MmUnFTgMRUgdep3cduUHDDn5sAj46pzz4HNR6UG8gFrpluC1i7dq5evh9j5yn3bmp4pcGilA#_=_")
# Set up a person for testing.
# Me: 532336768
# Alex:
# Ben: 100001427539048
person_1 = SocialObjects.Person()
person_1.id = "532336768"
# Test "Get Person."
person_obj = fb.User("GET", person_1)
print "Grabbed user from Facebook:"
print unicode(person_obj)
# Test "Get Music."
music_list = fb.Music("GET", person_1)
print "<Music>"
for band in music_list:
print "- " + band
print "</Music>"
# Test "Get Movies."
movie_list = fb.Movies("GET", person_1)
print "<Movies>"
for movie in movie_list:
print "- " + movie
print "</Movies>"
# Test "Get Books."
book_list = fb.Books("GET", person_1)
print "<Books>"
for book in book_list:
print "- " + book
print "</Books>"
# Test "Get Statuses."
statuses = fb.Statuses("GET", person_1)
print unicode(statuses)
# Test "Get Friends."
friends = fb.Friends("GET", person_1)
print unicode(friends)
# Test "Get Albums."
albums = fb.Albums("GET", person_1)
print unicode(albums)
# Test "Get Images."
for album in albums.objects:
tmp_album = fb.Images("GET", album)
print unicode(tmp_album)
# Test "Get Check-ins."
checkins = fb.Checkins("GET", person_1)
print unicode(checkins)
# End.
print "</Tests>"
|
uoscompsci/PRISONER
|
prisoner/gateway/FacebookGateway.py
|
Python
|
bsd-3-clause
| 65,871
|
[
"VisIt"
] |
59fd35e2483a18433f7243b02872eeb855653a0fe0d352f0bd74bc650b1a5746
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'FlashfiletypeEnum' : _MetaInfoEnum('FlashfiletypeEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'unknown':'unknown',
'config':'config',
'image':'image',
'directory':'directory',
'crashinfo':'crashinfo',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashdevice' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashdevice',
False,
[
_MetaInfoClassMember('ciscoFlashDevicesSupported', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of Flash devices supported by the system.
If the system does not support any Flash devices, this
MIB will not be loaded on that system. The value of this
object will therefore be atleast 1.
''',
'ciscoflashdevicessupported',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashDevice',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashcfg' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashcfg',
False,
[
_MetaInfoClassMember('ciscoFlashCfgDevInsNotifEnable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether or not a notification should be
generated on the insertion of a Flash device.
If the value of this object is 'true' then the
ciscoFlashDeviceInsertedNotif notification
will be generated.
If the value of this object is 'false' then the
ciscoFlashDeviceInsertedNotif notification
will not be generated.
It is the responsibility of the management entity to
ensure that the SNMP administrative model is
configured in such a way as to allow the
notification to be delivered.
''',
'ciscoflashcfgdevinsnotifenable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCfgDevRemNotifEnable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether or not a notification should be
generated on the removal of a Flash device.
If the value of this object is 'true' then the
ciscoFlashDeviceRemovedNotif notification
will be generated.
If the value of this object is 'false' then the
ciscoFlashDeviceRemovedNotif notification
will not be generated.
It is the responsibility of the management entity to
ensure that the SNMP administrative model is
configured in such a way as to allow the
notification to be delivered.
''',
'ciscoflashcfgdevremnotifenable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionLowSpaceNotifEnable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' This object specifies whether or not a notification should be
generated when the free space falls below the threshold value on
a flash partition and on recovery from low space.
If the value of this object is 'true' then
ciscoFlashPartitionLowSpaceNotif and
ciscoFlashPartitionLowSpaceRecoveryNotif notifications will be
generated.
If the value of this object is 'false' then the
ciscoFlashPartitionLowSpaceNotif and
ciscoFlashPartitionLowSpaceRecoveryNotif notifications
will not be generated.
It is the responsibility of the management entity to
ensure that the SNMP administrative model is
configured in such a way as to allow the
notifications to be delivered.
''',
'ciscoflashpartitionlowspacenotifenable',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashCfg',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry.CiscoflashdeviceprogrammingjumperEnum' : _MetaInfoEnum('CiscoflashdeviceprogrammingjumperEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'installed':'installed',
'notInstalled':'notInstalled',
'unknown':'unknown',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry',
False,
[
_MetaInfoClassMember('ciscoFlashDeviceIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Flash device sequence number to index within the
table of initialized flash devices.
The lowest value should be 1. The highest should be
less than or equal to the value of the
ciscoFlashDevicesSupported object.
''',
'ciscoflashdeviceindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashDeviceCard', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' This object will point to an instance of a card entry
in the cardTable. The card entry will give details about
the card on which the Flash device is actually located.
For most systems, this is usually the main processor board.
On the AGS+ systems, Flash is located on a separate multibus
card such as the MC.
This object will therefore be used to essentially index
into cardTable to retrieve details about the card such as
cardDescr, cardSlotNumber, etc.
''',
'ciscoflashdevicecard',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceChipCount', ATTRIBUTE, 'int' , None, None,
[('0', '64')], [],
''' Total number of chips within the Flash device.
The purpose of this object is to provide information
upfront to a management station on how much chip info
to expect and possibly help double check the chip index
against an upper limit when randomly retrieving chip
info for a partition.
''',
'ciscoflashdevicechipcount',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceController', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Flash device controller. The h/w card that actually
controls Flash read/write/erase. Relevant for the AGS+
systems where Flash may be controlled by the MC+, STR or
the ENVM cards, cards that may not actually contain the
Flash chips.
For systems that have removable PCMCIA flash cards that
are controlled by a PCMCIA controller chip, this object
may contain a description of that controller chip.
Where irrelevant (Flash is a direct memory mapped device
accessed directly by the main processor), this object will
have an empty (NULL) string.
''',
'ciscoflashdevicecontroller',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceDescr', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Description of a Flash device. The description is meant
to explain what the Flash device and its purpose is.
Current values are:
System flash - for the primary Flash used to store full
system images.
Boot flash - for the secondary Flash used to store
bootstrap images.
The ciscoFlashDeviceDescr, ciscoFlashDeviceController
(if applicable), and ciscoFlashPhyEntIndex objects are
expected to collectively give all information about a
Flash device.
The device description will always be available for a
removable device, even when the device has been removed.
''',
'ciscoflashdevicedescr',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceInitTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' System time at which device was initialized.
For fixed devices, this will be the system time at
boot up.
For removable devices, it will be the time at which
the device was inserted, which may be boot up time,
or a later time (if device was inserted later).
If a device (fixed or removable) was repartitioned,
it will be the time of repartitioning.
The purpose of this object is to help a management
station determine if a removable device has been
changed. The application should retrieve this
object prior to any operation and compare with
the previously retrieved value.
Note that this time will not be real time but a
running time maintained by the system. This running
time starts from zero when the system boots up.
For a removable device that has been removed, this
value will be zero.
''',
'ciscoflashdeviceinittime',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceMaxPartitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Max number of partitions supported by the system for
this Flash device. Default will be 1, which actually
means that partitioning is not supported. Note that
this value will be defined by system limitations, not
by the flash device itself (for eg., the system may
impose a limit of 2 partitions even though the device
may be large enough to be partitioned into 4 based on
the smallest partition unit supported).
On systems that execute code out of Flash, partitioning
is a way of creating multiple file systems in the Flash
device so that writing into or erasing of one file system
can be done while executing code residing in another file
system.
For systems executing code out of DRAM, partitioning
gives a way of sub-dividing a large Flash device for
easier management of files.
''',
'ciscoflashdevicemaxpartitions',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceMinPartitionSize', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object will give the minimum partition size
supported for this device. For systems that execute code
directly out of Flash, the minimum partition size needs
to be the bank size. (Bank size is equal to the size of a
chip multiplied by the width of the device. In most cases,
the device width is 4 bytes, and so the bank size would be
four times the size of a chip). This has to be so because
all programming commands affect the operation of an
entire chip (in our case, an entire bank because all
operations are done on the entire width of the device)
even though the actual command may be localized to a small
portion of each chip. So when executing code out of Flash,
one needs to be able to write and erase some portion of
Flash without affecting the code execution.
For systems that execute code out of DRAM or ROM, it is
possible to partition Flash with a finer granularity (for
eg., at erase sector boundaries) if the system code supports
such granularity.
This object will let a management entity know the
minimum partition size as defined by the system.
If the system does not support partitioning, the value
will be equal to the device size in ciscoFlashDeviceSize.
The maximum number of partitions that could be configured
will be equal to the minimum of
ciscoFlashDeviceMaxPartitions
and
(ciscoFlashDeviceSize / ciscoFlashDeviceMinPartitionSize).
If the total size of the flash device is greater than the
maximum value reportable by this object then this object should
report its maximum value(4,294,967,295) and
ciscoFlashDeviceMinPartitionSizeExtended must be used to report
the flash device's minimum partition size.
''',
'ciscoflashdeviceminpartitionsize',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceMinPartitionSizeExtended', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' This object provides the minimum partition size supported for
this device. This object is a 64-bit version of
ciscoFlashDeviceMinPatitionSize.
''',
'ciscoflashdeviceminpartitionsizeextended',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceName', ATTRIBUTE, 'str' , None, None,
[(0, 16)], [],
''' Flash device name. This name is used to refer to the
device within the system. Flash operations get directed
to a device based on this name.
The system has a concept of a default device.
This would be the primary or most used device in case of
multiple devices. The system directs an operation to the
default device whenever a device name is not specified.
The device name is therefore mandatory except when the
operation is being done on the default device, or,
the system supports only a single Flash device.
The device name will always be available for a
removable device, even when the device has been removed.
''',
'ciscoflashdevicename',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceNameExtended', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' Extended Flash device name whose size can be upto
255 characters. This name is used to refer to the
device within the system. Flash operations get directed
to a device based on this name.
The system has a concept of a default device.
This would be the primary or most used device in case
of multiple devices. The system directs an operation
to the default device whenever a device name is not
specified. The device name is therefore mandatory
except when the operation is being done on the
default device, or, the system supports only a single
Flash device. The device name will always be available
for a removable device, even when the device has been
removed.
''',
'ciscoflashdevicenameextended',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDevicePartitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Flash device partitions actually present. Number of
partitions cannot exceed the minimum of
ciscoFlashDeviceMaxPartitions
and
(ciscoFlashDeviceSize / ciscoFlashDeviceMinPartitionSize).
Will be equal to at least 1, the case where the partition
spans the entire device (actually no partitioning).
A partition will contain one or more minimum partition
units (where a minimum partition unit is defined by
ciscoFlashDeviceMinPartitionSize).
''',
'ciscoflashdevicepartitions',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceProgrammingJumper', REFERENCE_ENUM_CLASS, 'CiscoflashdeviceprogrammingjumperEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry.CiscoflashdeviceprogrammingjumperEnum',
[], [],
''' This object gives the state of a jumper (if present and can be
determined) that controls the programming voltage called Vpp
to the Flash device. Vpp is required for programming (erasing
and writing) Flash. For certain older technology chips it is
also required for identifying the chips (which in turn is
required to identify which programming algorithms to use;
different chips require different algorithms and commands).
The purpose of the jumper, on systems where it is available,
is to write protect a Flash device.
On most of the newer remote access routers, this jumper is
unavailable since users are not expected to visit remote sites
just to install and remove the jumpers when upgrading software
in the Flash device. The unknown(3) value will be returned for
such systems and can be interpreted to mean that a programming
jumper is not present or not required on those systems.
On systems where the programming jumper state can be read back
via a hardware register, the installed(1) or notInstalled(2)
value will be returned.
This object is expected to be used in conjunction with the
ciscoFlashPartitionStatus object whenever that object has
the readOnly(1) value. In such a case, this object will
indicate whether the programming jumper is a possible reason
for the readOnly state.
''',
'ciscoflashdeviceprogrammingjumper',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceRemovable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether Flash device is removable. Generally, only PCMCIA
Flash cards will be treated as removable. Socketed Flash
chips and Flash SIMM modules will not be treated as removable.
Simply put, only those Flash devices that can be inserted
or removed without opening the hardware casing will be
considered removable.
Further, removable Flash devices are expected to have
the necessary hardware support -
1. on-line removal and insertion
2. interrupt generation on removal or insertion.
''',
'ciscoflashdeviceremovable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceSize', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total size of the Flash device.
For a removable device, the size will be zero if
the device has been removed.
If the total size of the flash device is greater than the
maximum value reportable by this object then this object
should report its maximum value(4,294,967,295) and
ciscoFlashDeviceSizeExtended must be used to report the
flash device's size.
''',
'ciscoflashdevicesize',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceSizeExtended', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total size of the Flash device.
For a removable device, the size will be zero if
the device has been removed.
This object is a 64-bit version of ciscoFlashDeviceSize.
''',
'ciscoflashdevicesizeextended',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPhyEntIndex', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' This object indicates the physical entity index of a
physical entity in entPhysicalTable which the flash
device actually located.
''',
'ciscoflashphyentindex',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashDeviceEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashdevicetable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashdevicetable',
False,
[
_MetaInfoClassMember('ciscoFlashDeviceEntry', REFERENCE_LIST, 'Ciscoflashdeviceentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry',
[], [],
''' An entry in the table of flash device properties for
each initialized flash device.
Each entry can be randomly accessed by using
ciscoFlashDeviceIndex as an index into the table.
Note that removable devices will have an entry in
the table even when they have been removed. However,
a non-removable device that has not been installed
will not have an entry in the table.
''',
'ciscoflashdeviceentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashDeviceTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashchiptable.Ciscoflashchipentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashchiptable.Ciscoflashchipentry',
False,
[
_MetaInfoClassMember('ciscoFlashDeviceIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashdeviceindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashChipIndex', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Chip sequence number within selected flash device.
Used to index within chip info table.
Value starts from 1 and should not be greater than
ciscoFlashDeviceChipCount for that device.
When retrieving chip information for chips within a
partition, the sequence number should lie between
ciscoFlashPartitionStartChip & ciscoFlashPartitionEndChip
(both inclusive).
''',
'ciscoflashchipindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashChipCode', ATTRIBUTE, 'str' , None, None,
[(0, 5)], [],
''' Manufacturer and device code for a chip.
Lower byte will contain the device code.
Upper byte will contain the manufacturer code.
If a chip code is unknown because it could not
be queried out of the chip, the value of this
object will be 00:00.
Since programming algorithms differ from chip type to
chip type, this chip code should be used to determine
which algorithms to use (and thereby whether the chip
is supported in the first place).
''',
'ciscoflashchipcode',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipDescr', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Flash chip name corresponding to the chip code.
The name will contain the manufacturer and the
chip type. It will be of the form :
Intel 27F008SA.
In the case where a chip code is unknown, this
object will be an empty (NULL) string.
In the case where the chip code is known but the
chip is not supported by the system, this object
will be an empty (NULL) string.
A management station is therefore expected to use the
chip code and the chip description in conjunction
to provide additional information whenever the
ciscoFlashPartitionStatus object has the readOnly(1)
value.
''',
'ciscoflashchipdescr',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipEraseRetries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object will provide a cumulative count
(since last system boot up or initialization) of
the number of erase retries that were done in the chip.
Typically, a maximum of 2000 retries are done in a
single erase zone (which may be a full chip or a
portion, depending on the chip technology) before
flagging an erase error.
A management station is expected to get this object
for each chip in a partition after an erase failure
in that partition. To keep a track of retries for
a given erase operation, the management station would
have to retrieve the values for the concerned chips
before and after any erase operation.
Note that erase may be done through an independent
command, or through a copy-to-flash command.
''',
'ciscoflashchiperaseretries',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipMaxEraseRetries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The maximum number of erase retries done within
an erase sector before declaring an erase failure.
''',
'ciscoflashchipmaxeraseretries',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipMaxWriteRetries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The maximum number of write retries done at any
single location before declaring a write failure.
''',
'ciscoflashchipmaxwriteretries',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipWriteRetries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object will provide a cumulative count
(since last system boot up or initialization) of
the number of write retries that were done in the chip.
If no writes have been done to Flash, the count
will be zero. Typically, a maximum of 25 retries are
done on a single location before flagging a write
error.
A management station is expected to get this object
for each chip in a partition after a write failure
in that partition. To keep a track of retries for
a given write operation, the management station would
have to retrieve the values for the concerned chips
before and after any write operation.
''',
'ciscoflashchipwriteretries',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashChipEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashchiptable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashchiptable',
False,
[
_MetaInfoClassMember('ciscoFlashChipEntry', REFERENCE_LIST, 'Ciscoflashchipentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashchiptable.Ciscoflashchipentry',
[], [],
''' An entry in the table of chip info for each
flash device initialized in the system.
An entry is indexed by two objects - the
device index and the chip index within that
device.
''',
'ciscoflashchipentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashChipTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionchecksumalgorithmEnum' : _MetaInfoEnum('CiscoflashpartitionchecksumalgorithmEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'simpleChecksum':'simpleChecksum',
'undefined':'undefined',
'simpleCRC':'simpleCRC',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionstatusEnum' : _MetaInfoEnum('CiscoflashpartitionstatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'readOnly':'readOnly',
'runFromFlash':'runFromFlash',
'readWrite':'readWrite',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionupgrademethodEnum' : _MetaInfoEnum('CiscoflashpartitionupgrademethodEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'unknown':'unknown',
'rxbootFLH':'rxbootFLH',
'direct':'direct',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry',
False,
[
_MetaInfoClassMember('ciscoFlashDeviceIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashdeviceindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashPartitionIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Flash partition sequence number used to index within
table of initialized flash partitions.
''',
'ciscoflashpartitionindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashPartitionChecksumAlgorithm', REFERENCE_ENUM_CLASS, 'CiscoflashpartitionchecksumalgorithmEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionchecksumalgorithmEnum',
[], [],
''' Checksum algorithm identifier for checksum method
used by the file system. Normally, this would be
fixed for a particular file system. When a file
system writes a file to Flash, it checksums the
data written. The checksum then serves as a way
to validate the data read back whenever the file
is opened for reading.
Since there is no way, when using TFTP, to guarantee
that a network download has been error free (since
UDP checksums may not have been enabled), this
object together with the ciscoFlashFileChecksum
object provides a method for any management station
to regenerate the checksum of the original file
on the server and compare checksums to ensure that
the file download to Flash was error free.
simpleChecksum represents a simple 1s complement
addition of short word values. Other algorithm
values will be added as necessary.
''',
'ciscoflashpartitionchecksumalgorithm',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionEndChip', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Chip sequence number of last chip in partition.
Used as an index into the chip table.
''',
'ciscoflashpartitionendchip',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionFileCount', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Count of all files in a flash partition. Both
good and bad (deleted or invalid checksum) files
will be included in this count.
''',
'ciscoflashpartitionfilecount',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionFileNameLength', ATTRIBUTE, 'int' , None, None,
[('1', '256')], [],
''' Maximum file name length supported by the file
system.
Max file name length will depend on the file
system implemented. Today, all file systems
support a max length of at least 48 bytes.
A management entity must use this object when
prompting a user for, or deriving the Flash file
name length.
''',
'ciscoflashpartitionfilenamelength',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionFreeSpace', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free space within a Flash partition.
Note that the actual size of a file in Flash includes
a small overhead that represents the file system's
file header.
Certain file systems may also have a partition or
device header overhead to be considered when
computing the free space.
Free space will be computed as total partition size
less size of all existing files (valid/invalid/deleted
files and including file header of each file),
less size of any partition header, less size of
header of next file to be copied in. In short, this
object will give the size of the largest file that
can be copied in. The management entity will not be
expected to know or use any overheads such as file
and partition header lengths, since such overheads
may vary from file system to file system.
Deleted files in Flash do not free up space.
A partition may have to be erased in order to reclaim
the space occupied by files.
If the free space within a flash partition is greater than
the maximum value reportable by this object then this object
should report its maximum value(4,294,967,295) and
ciscoFlashPartitionFreeSpaceExtended
must be used to report the flash partition's free space.
''',
'ciscoflashpartitionfreespace',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionFreeSpaceExtended', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Free space within a Flash partition.
Note that the actual size of a file in Flash includes
a small overhead that represents the file system's
file header.
Certain file systems may also have a partition or
device header overhead to be considered when
computing the free space.
Free space will be computed as total partition size
less size of all existing files (valid/invalid/deleted
files and including file header of each file),
less size of any partition header, less size of
header of next file to be copied in. In short, this
object will give the size of the largest file that
can be copied in. The management entity will not be
expected to know or use any overheads such as file
and partition header lengths, since such overheads
may vary from file system to file system.
Deleted files in Flash do not free up space.
A partition may have to be erased in order to reclaim
the space occupied by files.
This object is a 64-bit version of ciscoFlashPartitionFreeSpace
''',
'ciscoflashpartitionfreespaceextended',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionLowSpaceNotifThreshold', ATTRIBUTE, 'int' , None, None,
[('0', '100')], [],
''' This object specifies the minimum threshold value in percentage
of free space for each partition. If the free space available
goes below this threshold value and if
ciscoFlashPartionLowSpaceNotifEnable is set to true,
ciscoFlashPartitionLowSpaceNotif will be generated. When the
available free space comes back to the threshold value
ciscoFlashPartionLowSpaceRecoveryNotif will be generated.
''',
'ciscoflashpartitionlowspacenotifthreshold',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionName', ATTRIBUTE, 'str' , None, None,
[(0, 16)], [],
''' Flash partition name used to refer to a partition
by the system. This can be any alpha-numeric character
string of the form AAAAAAAAnn, where A represents an
optional alpha character and n a numeric character.
Any numeric characters must always form the trailing
part of the string. The system will strip off the alpha
characters and use the numeric portion to map to a
partition index.
Flash operations get directed to a device partition
based on this name.
The system has a concept of a default partition. This
would be the first partition in the device. The system
directs an operation to the default partition whenever
a partition name is not specified.
The partition name is therefore mandatory except when
the operation is being done on the default partition, or
the device has just one partition (is not partitioned).
''',
'ciscoflashpartitionname',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionNeedErasure', ATTRIBUTE, 'bool' , None, None,
[], [],
''' This object indicates whether a partition requires
erasure before any write operations can be done in it.
A management station should therefore retrieve this
object prior to attempting any write operation.
A partition requires erasure after it becomes full
free space left is less than or equal to the
(filesystem file header size).
A partition also requires erasure if the system does
not find the existence of any file system when it
boots up.
The partition may be erased explicitly through the
erase(5) command, or by using the copyToFlashWithErase(1)
command.
If a copyToFlashWithoutErase(2) command is issued
when this object has the TRUE value, the command
will fail.
''',
'ciscoflashpartitionneederasure',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionSize', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Flash partition size. It should be an integral
multiple of ciscoFlashDeviceMinPartitionSize.
If there is a single partition, this size will be equal
to ciscoFlashDeviceSize.
If the size of the flash partition is greater than the
maximum value reportable by this object then this object
should report its maximum value(4,294,967,295) and
ciscoFlashPartitionSizeExtended must be used to report the
flash partition's size.
''',
'ciscoflashpartitionsize',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionSizeExtended', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Flash partition size. It should be an integral
multiple of ciscoFlashDeviceMinPartitionSize.
If there is a single partition, this size will be equal
to ciscoFlashDeviceSize.
This object is a 64-bit version of ciscoFlashPartitionSize
''',
'ciscoflashpartitionsizeextended',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionStartChip', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Chip sequence number of first chip in partition.
Used as an index into the chip table.
''',
'ciscoflashpartitionstartchip',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionStatus', REFERENCE_ENUM_CLASS, 'CiscoflashpartitionstatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionstatusEnum',
[], [],
''' Flash partition status can be :
* readOnly if device is not programmable either because
chips could not be recognized or an erroneous mismatch
of chips was detected. Chip recognition may fail either
because the chips are not supported by the system,
or because the Vpp voltage required to identify chips
has been disabled via the programming jumper.
The ciscoFlashDeviceProgrammingJumper, ciscoFlashChipCode,
and ciscoFlashChipDescr objects can be examined to get
more details on the cause of this status
* runFromFlash (RFF) if current image is running from
this partition.
The ciscoFlashPartitionUpgradeMethod object will then
indicate whether the Flash Load Helper can be used
to write a file to this partition or not.
* readWrite if partition is programmable.
''',
'ciscoflashpartitionstatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionUpgradeMethod', REFERENCE_ENUM_CLASS, 'CiscoflashpartitionupgrademethodEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry.CiscoflashpartitionupgrademethodEnum',
[], [],
''' Flash partition upgrade method, ie., method by which
new files can be downloaded into the partition.
FLH stands for Flash Load Helper, a feature provided
on run-from-Flash systems for upgrading Flash. This
feature uses the bootstrap code in ROMs to help in
automatic download.
This object should be retrieved if the partition
status is runFromFlash(2).
If the partition status is readOnly(1), the upgrade
method would depend on the reason for the readOnly
status. For eg., it may simply be a matter of installing
the programming jumper, or it may require execution of a
later version of software that supports the Flash chips.
unknown - the current system image does not know
how Flash can be programmed. A possible
method would be to reload the ROM image
and perform the upgrade manually.
rxbootFLH - the Flash Load Helper is available to
download files to Flash. A copy-to-flash
command can be used and this system image
will automatically reload the Rxboot image
in ROM and direct it to carry out the
download request.
direct - will be done directly by this image.
''',
'ciscoflashpartitionupgrademethod',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashPartitionEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashpartitiontable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashpartitiontable',
False,
[
_MetaInfoClassMember('ciscoFlashPartitionEntry', REFERENCE_LIST, 'Ciscoflashpartitionentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry',
[], [],
''' An entry in the table of flash partition properties
for each initialized flash partition. Each entry
will be indexed by a device number and a partition
number within the device.
''',
'ciscoflashpartitionentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashPartitionTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry.CiscoflashfilestatusEnum' : _MetaInfoEnum('CiscoflashfilestatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'deleted':'deleted',
'invalidChecksum':'invalidChecksum',
'valid':'valid',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry',
False,
[
_MetaInfoClassMember('ciscoFlashDeviceIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashdeviceindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashPartitionIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashpartitionindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashFileIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Flash file sequence number used to index within
a Flash partition directory table.
''',
'ciscoflashfileindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashFileChecksum', ATTRIBUTE, 'str' , None, None,
[], [],
''' File checksum stored in the file header. This
checksum is computed and stored when the file is
written into Flash. It serves to validate the data
written into Flash.
Whereas the system will generate and store the checksum
internally in hexadecimal form, this object will
provide the checksum in a string form.
The checksum will be available for all valid and
invalid-checksum files.
''',
'ciscoflashfilechecksum',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileDate', ATTRIBUTE, 'str' , None, None,
[], [],
''' The time at which this file was created.
''',
'ciscoflashfiledate',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileName', ATTRIBUTE, 'str' , None, None,
[(1, 255)], [],
''' Flash file name as specified by the user copying in
the file. The name should not include the colon (:)
character as it is a special separator character used
to delineate the device name, partition name, and the
file name.
''',
'ciscoflashfilename',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileSize', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Size of the file in bytes. Note that this size does
not include the size of the filesystem file header.
File size will always be non-zero.
''',
'ciscoflashfilesize',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileStatus', REFERENCE_ENUM_CLASS, 'CiscoflashfilestatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry.CiscoflashfilestatusEnum',
[], [],
''' Status of a file.
A file could be explicitly deleted if the file system
supports such a user command facility. Alternately,
an existing good file would be automatically deleted
if another good file with the same name were copied in.
Note that deleted files continue to occupy prime
Flash real estate.
A file is marked as having an invalid checksum if any
checksum mismatch was detected while writing or reading
the file. Incomplete files (files truncated either
because of lack of free space, or a network download
failure) are also written with a bad checksum and
marked as invalid.
''',
'ciscoflashfilestatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileType', REFERENCE_ENUM_CLASS, 'FlashfiletypeEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'FlashfiletypeEnum',
[], [],
''' Type of the file.
''',
'ciscoflashfiletype',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashFileEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashfiletable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashfiletable',
False,
[
_MetaInfoClassMember('ciscoFlashFileEntry', REFERENCE_LIST, 'Ciscoflashfileentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry',
[], [],
''' An entry in the table of Flash file properties
for each initialized Flash partition. Each entry
represents a file and gives details about the file.
An entry is indexed using the device number,
partition number within the device, and file
number within the partition.
''',
'ciscoflashfileentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashFileTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry.CiscoflashfilebytypestatusEnum' : _MetaInfoEnum('CiscoflashfilebytypestatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'deleted':'deleted',
'invalidChecksum':'invalidChecksum',
'valid':'valid',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry',
False,
[
_MetaInfoClassMember('ciscoFlashFileType', REFERENCE_ENUM_CLASS, 'FlashfiletypeEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'FlashfiletypeEnum',
[], [],
''' ''',
'ciscoflashfiletype',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashDeviceIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashdeviceindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashPartitionIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashpartitionindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashFileIndex', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' ''',
'ciscoflashfileindex',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashFileByTypeChecksum', ATTRIBUTE, 'str' , None, None,
[], [],
''' This object represents exactly the
same info as ciscoFlashFileChecksum
object in ciscoFlashFileTable.
''',
'ciscoflashfilebytypechecksum',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileByTypeDate', ATTRIBUTE, 'str' , None, None,
[], [],
''' This object represents exactly the
same info as ciscoFlashFileDate
object in ciscoFlashFileTable.
''',
'ciscoflashfilebytypedate',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileByTypeName', ATTRIBUTE, 'str' , None, None,
[(1, 255)], [],
''' This object represents exactly the
same info as ciscoFlashFileName
object in ciscoFlashFileTable.
''',
'ciscoflashfilebytypename',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileByTypeSize', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object represents exactly the
same info as ciscoFlashFileSize
object in ciscoFlashFileTable.
''',
'ciscoflashfilebytypesize',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileByTypeStatus', REFERENCE_ENUM_CLASS, 'CiscoflashfilebytypestatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry.CiscoflashfilebytypestatusEnum',
[], [],
''' This object represents exactly the
same info as ciscoFlashFileStatus
object in ciscoFlashFileTable.
''',
'ciscoflashfilebytypestatus',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashFileByTypeEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashfilebytypetable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashfilebytypetable',
False,
[
_MetaInfoClassMember('ciscoFlashFileByTypeEntry', REFERENCE_LIST, 'Ciscoflashfilebytypeentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry',
[], [],
''' An entry in the table of Flash file properties
for each initialized Flash partition. Each entry
represents a file sorted by file type.
This table contains exactly the same set of rows
as are contained in the ciscoFlashFileTable but
in a different order, i.e., ordered by
the type of file, given by ciscoFlashFileType;
the device number, given by ciscoFlashDeviceIndex;
the partition number within the device, given by
ciscoFlashPartitionIndex;
the file number within the partition, given by
ciscoFlashFileIndex.
''',
'ciscoflashfilebytypeentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashFileByTypeTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopycommandEnum' : _MetaInfoEnum('CiscoflashcopycommandEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'copyToFlashWithErase':'copyToFlashWithErase',
'copyToFlashWithoutErase':'copyToFlashWithoutErase',
'copyFromFlash':'copyFromFlash',
'copyFromFlhLog':'copyFromFlhLog',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopyprotocolEnum' : _MetaInfoEnum('CiscoflashcopyprotocolEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'tftp':'tftp',
'rcp':'rcp',
'lex':'lex',
'ftp':'ftp',
'scp':'scp',
'sftp':'sftp',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopystatusEnum' : _MetaInfoEnum('CiscoflashcopystatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'copyOperationPending':'copyOperationPending',
'copyInProgress':'copyInProgress',
'copyOperationSuccess':'copyOperationSuccess',
'copyInvalidOperation':'copyInvalidOperation',
'copyInvalidProtocol':'copyInvalidProtocol',
'copyInvalidSourceName':'copyInvalidSourceName',
'copyInvalidDestName':'copyInvalidDestName',
'copyInvalidServerAddress':'copyInvalidServerAddress',
'copyDeviceBusy':'copyDeviceBusy',
'copyDeviceOpenError':'copyDeviceOpenError',
'copyDeviceError':'copyDeviceError',
'copyDeviceNotProgrammable':'copyDeviceNotProgrammable',
'copyDeviceFull':'copyDeviceFull',
'copyFileOpenError':'copyFileOpenError',
'copyFileTransferError':'copyFileTransferError',
'copyFileChecksumError':'copyFileChecksumError',
'copyNoMemory':'copyNoMemory',
'copyUnknownFailure':'copyUnknownFailure',
'copyInvalidSignature':'copyInvalidSignature',
'copyProhibited':'copyProhibited',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry',
False,
[
_MetaInfoClassMember('ciscoFlashCopySerialNumber', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Object which specifies a unique entry in the
table. A management station wishing to initiate a
copy operation should use a pseudo-random value for
this object when creating or modifying an instance of
a ciscoFlashCopyEntry.
''',
'ciscoflashcopyserialnumber',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashCopyCommand', REFERENCE_ENUM_CLASS, 'CiscoflashcopycommandEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopycommandEnum',
[], [],
''' The copy command to be executed. Mandatory.
Note that it is possible for a system to support
multiple file systems (different file systems on
different Flash devices, or different file systems
on different partitions within a device). Each such
file system may support only a subset of these commands.
If a command is unsupported, the invalidOperation(3)
error will be reported in the operation status.
Command Remarks
copyToFlashWithErase Copy a file to flash; erase
flash before copy.
Use the TFTP or rcp protocol.
copyToFlashWithoutErase Copy a file to flash; do not
erase.
Note that this command will fail
if the PartitionNeedErasure
object specifies that the
partition being copied to needs
erasure.
Use the TFTP or rcp protocol.
copyFromFlash Copy a file from flash using
the TFTP, rcp or lex protocol.
Note that the lex protocol
can only be used to copy to a
lex device.
copyFromFlhLog Copy contents of FLH log to
server using TFTP protocol.
Command table Parameters
copyToFlashWithErase CopyProtocol
CopyServerAddress
CopySourceName
CopyDestinationName (opt)
CopyRemoteUserName (opt)
CopyNotifyOnCompletion (opt)
copyToFlashWithoutErase CopyProtocol
CopyServerAddress
CopySourceName
CopyDestinationName (opt)
CopyRemoteUserName (opt)
CopyNotifyOnCompletion (opt)
copyFromFlash CopyProtocol
CopyServerAddress
CopySourceName
CopyDestinationName (opt)
CopyRemoteUserName (opt)
CopyNotifyOnCompletion (opt)
copyFromFlhLog CopyProtocol
CopyServerAddress
CopyDestinationName
CopyNotifyOnCompletion (opt)
''',
'ciscoflashcopycommand',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyDestinationName', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' Destination file name.
For a copy to Flash:
File name must be of the form
{device>:][<partition>:]<file>
where <device> is a value obtained from FlashDeviceName,
<partition> is obtained from FlashPartitionName
and <file> is any character string that does not have
embedded colon characters.
A management station could derive its own partition name
as per the description for the ciscoFlashPartitionName
object.
If <device> is not specified, the default Flash device
will be assumed.
If <partition> is not specified, the default partition
will be assumed. If a device is not partitioned into 2
or more partitions, this value may be left out.
If <file> is not specified, it will default to <file>
specified in ciscoFlashCopySourceName.
For a copy from Flash via tftp or rcp, the file name will be
as per the file naming conventions and destination sub-directory
on the server. If not specified, <file> from the source
file name will be used.
For a copy from Flash via lex, this string will consist
of numeric characters specifying the interface on the
lex box that will receive the source flash image.
''',
'ciscoflashcopydestinationname',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyEntryStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' The status of this table entry.
''',
'ciscoflashcopyentrystatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyNotifyOnCompletion', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether or not a notification should be
generated on the completion of the copy operation.
If specified, ciscoFlashCopyCompletionTrap
will be generated. It is the responsibility of the
management entity to ensure that the SNMP administrative
model is configured in such a way as to allow the
notification to be delivered.
''',
'ciscoflashcopynotifyoncompletion',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyProtocol', REFERENCE_ENUM_CLASS, 'CiscoflashcopyprotocolEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopyprotocolEnum',
[], [],
''' The protocol to be used for any copy. Optional.
Will default to tftp if not specified.
Since feature support depends on a software release,
version number within the release, platform, and
maybe the image type (subset type), a management
station would be expected to somehow determine
the protocol support for a command.
''',
'ciscoflashcopyprotocol',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyRemotePassword', ATTRIBUTE, 'str' , None, None,
[(1, 40)], [],
''' Password used by ftp, sftp or scp for copying a file
to/from an ftp/sftp/scp server. This object must be
created when the ciscoFlashCopyProtocol is ftp, sftp or
scp. Reading it returns a zero-length string for
security reasons.
''',
'ciscoflashcopyremotepassword',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyRemoteUserName', ATTRIBUTE, 'str' , None, None,
[(1, 255)], [],
''' Remote user name for copy via rcp protocol. Optional.
This object will be ignored for protocols other than
rcp.
If specified, it will override the remote user-name
configured through the
rcmd remote-username
configuration command.
The remote user-name is sent as the server user-name
in an rcp command request sent by the system to a
remote rcp server.
''',
'ciscoflashcopyremoteusername',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyServerAddress', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' The server address to be used for any copy. Optional.
Will default to 'FFFFFFFF'H (or 255.255.255.255).
Since this object can just hold only IPv4 Transport
type, it is deprecated and replaced by
ciscoFlashCopyServerAddrRev1.
''',
'ciscoflashcopyserveraddress',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyServerAddrRev1', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' The server address to be used for any copy. Optional.
Will default to 'FFFFFFFF'H (or 255.255.255.255).
The Format of this address depends on the value of the
ciscoFlashCopyServerAddrType.
This object deprecates the old
ciscoFlashCopyServerAddress object.
''',
'ciscoflashcopyserveraddrrev1',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyServerAddrType', REFERENCE_ENUM_CLASS, 'InetaddresstypeEnum' , 'ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetaddresstypeEnum',
[], [],
''' This object indicates the transport type of the
address contained in
ciscoFlashCopyServerAddrRev1. Optional.
Will default to '1' (IPv4 address type).
''',
'ciscoflashcopyserveraddrtype',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopySourceName', ATTRIBUTE, 'str' , None, None,
[(1, 255)], [],
''' Source file name, either in Flash or on a server,
depending on the type of copy command. Mandatory.
For a copy from Flash:
File name must be of the form
[device>:][:]
where is a value obtained from FlashDeviceName,
is obtained from FlashPartitionName
and is the name of a file in Flash.
A management station could derive its own partition name
as per the description for the ciscoFlashPartitionName
object.
If <device> is not specified, the default Flash device
will be assumed.
If <partition> is not specified, the default partition
will be assumed. If a device is not partitioned into 2
or more partitions, this value may be left out.
For a copy to Flash, the file name will be as per
the file naming conventions and path to the file on
the server.
''',
'ciscoflashcopysourcename',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyStatus', REFERENCE_ENUM_CLASS, 'CiscoflashcopystatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry.CiscoflashcopystatusEnum',
[], [],
''' The status of the specified copy operation.
copyOperationPending :
operation request is received and
pending for validation and process
copyInProgress :
specified operation is active
copyOperationSuccess :
specified operation is supported and
completed successfully
copyInvalidOperation :
command invalid or command-protocol-device
combination unsupported
copyInvalidProtocol :
invalid protocol specified
copyInvalidSourceName :
invalid source file name specified
For the copy from flash to lex operation, this
error code will be returned when the source file
is not a valid lex image.
copyInvalidDestName :
invalid target name (file or partition or
device name) specified
For the copy from flash to lex operation, this
error code will be returned when no lex devices
are connected to the router or when an invalid
lex interface number has been specified in
the destination string.
copyInvalidServerAddress :
invalid server address specified
copyDeviceBusy :
specified device is in use and locked by
another process
copyDeviceOpenError :
invalid device name
copyDeviceError :
device read, write or erase error
copyDeviceNotProgrammable :
device is read-only but a write or erase
operation was specified
copyDeviceFull :
device is filled to capacity
copyFileOpenError :
invalid file name; file not found in partition
copyFileTransferError :
file transfer was unsuccessfull; network failure
copyFileChecksumError :
file checksum in Flash failed
copyNoMemory :
system running low on memory
copyUnknownFailure :
failure unknown
copyProhibited:
stop user from overwriting current boot image file.
''',
'ciscoflashcopystatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time taken for the copy operation. This object will
be like a stopwatch, starting when the operation
starts, stopping when the operation completes.
If a management entity keeps a database of completion
times for various operations, it can then use the
stopwatch capability to display percentage completion
time.
''',
'ciscoflashcopytime',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyVerify', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether the file that is copied need to
be verified for integrity / authenticity, after
copy succeeds. If it is set to true, and if the
file that is copied doesn't have integrity /authenticity
attachement, or the integrity / authenticity check
fails, then the command will be aborted, and the file
that is copied will be deleted from the destination
file system.
''',
'ciscoflashcopyverify',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashCopyEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashcopytable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashcopytable',
False,
[
_MetaInfoClassMember('ciscoFlashCopyEntry', REFERENCE_LIST, 'Ciscoflashcopyentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry',
[], [],
''' A Flash copy operation entry. Each entry consists
of a command, a source, and optional parameters such
as protocol to be used, a destination, a server address,
etc.
A management station wishing to create an entry should
first generate a pseudo-random serial number to be used
as the index to this sparse table. The station should
then create the associated instance of the row status
object. It must also, either in the same or in successive
PDUs, create the associated instance of the command and
parameter objects. It should also modify the default values
for any of the parameter objects if the defaults are not
appropriate.
Once the appropriate instances of all the command
objects have been created, either by an explicit SNMP
set request or by default, the row status should be set
to active to initiate the operation. Note that this entire
procedure may be initiated via a single set request which
specifies a row status of createAndGo as well as specifies
valid values for the non-defaulted parameter objects.
Once an operation has been activated, it cannot be
stopped.
Once the operation completes, the management station should
retrieve the value of the status object (and time if
desired), and delete the entry. In order to prevent old
entries from clogging the table, entries will be aged out,
but an entry will never be deleted within 5 minutes of
completing.
''',
'ciscoflashcopyentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashCopyTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry.CiscoflashpartitioningcommandEnum' : _MetaInfoEnum('CiscoflashpartitioningcommandEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'partition':'partition',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry.CiscoflashpartitioningstatusEnum' : _MetaInfoEnum('CiscoflashpartitioningstatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'partitioningInProgress':'partitioningInProgress',
'partitioningOperationSuccess':'partitioningOperationSuccess',
'partitioningInvalidOperation':'partitioningInvalidOperation',
'partitioningInvalidDestName':'partitioningInvalidDestName',
'partitioningInvalidPartitionCount':'partitioningInvalidPartitionCount',
'partitioningInvalidPartitionSizes':'partitioningInvalidPartitionSizes',
'partitioningDeviceBusy':'partitioningDeviceBusy',
'partitioningDeviceOpenError':'partitioningDeviceOpenError',
'partitioningDeviceError':'partitioningDeviceError',
'partitioningNoMemory':'partitioningNoMemory',
'partitioningUnknownFailure':'partitioningUnknownFailure',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry',
False,
[
_MetaInfoClassMember('ciscoFlashPartitioningSerialNumber', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Object which specifies a unique entry in the partitioning
operations table. A management station wishing to initiate
a partitioning operation should use a pseudo-random value
for this object when creating or modifying an instance of
a ciscoFlashPartitioningEntry.
''',
'ciscoflashpartitioningserialnumber',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashPartitioningCommand', REFERENCE_ENUM_CLASS, 'CiscoflashpartitioningcommandEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry.CiscoflashpartitioningcommandEnum',
[], [],
''' The partitioning command to be executed. Mandatory.
If the command is unsupported, the
partitioningInvalidOperation
error will be reported in the operation status.
Command Remarks
partition Partition a Flash device.
All the prerequisites for
partitioning must be met for
this command to succeed.
Command table Parameters
1) partition PartitioningDestinationName
PartitioningPartitionCount
PartitioningPartitionSizes (opt)
PartitioningNotifyOnCompletion (opt)
''',
'ciscoflashpartitioningcommand',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningDestinationName', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' Destination device name. This name will be the value
obtained from FlashDeviceName.
If the name is not specified, the default Flash device
will be assumed.
''',
'ciscoflashpartitioningdestinationname',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningEntryStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' The status of this table entry.
''',
'ciscoflashpartitioningentrystatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningNotifyOnCompletion', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether or not a notification should be
generated on the completion of the partitioning operation.
If specified, ciscoFlashPartitioningCompletionTrap
will be generated. It is the responsibility of the
management entity to ensure that the SNMP administrative
model is configured in such a way as to allow the
notification to be delivered.
''',
'ciscoflashpartitioningnotifyoncompletion',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningPartitionCount', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' This object is used to specify the number of
partitions to be created. Its value cannot exceed
the value of ciscoFlashDeviceMaxPartitions.
To undo partitioning (revert to a single partition),
this object must have the value 1.
''',
'ciscoflashpartitioningpartitioncount',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningPartitionSizes', ATTRIBUTE, 'str' , None, None,
[], [],
''' This object is used to explicitly specify the size
of each partition to be created.
The size of each partition will be in units of
ciscoFlashDeviceMinPartitionSize.
The value of this object will be in the form:
<part1>:<part2>...:<partn>
If partition sizes are not specified, the system
will calculate default sizes based on the partition
count, the minimum partition size, and the device
size. Partition size need not be specified when
undoing partitioning (partition count is 1).
If partition sizes are specified, the number of
sizes specified must exactly match the partition
count. If not, the partitioning command will be
rejected with the invalidPartitionSizes error .
''',
'ciscoflashpartitioningpartitionsizes',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningStatus', REFERENCE_ENUM_CLASS, 'CiscoflashpartitioningstatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry.CiscoflashpartitioningstatusEnum',
[], [],
''' The status of the specified partitioning operation.
partitioningInProgress :
specified operation is active
partitioningOperationSuccess :
specified operation is supported and completed
successfully
partitioningInvalidOperation :
command invalid or command-protocol-device
combination unsupported
partitioningInvalidDestName :
invalid target name (file or partition or
device name) specified
partitioningInvalidPartitionCount :
invalid partition count specified for the
partitioning command
partitioningInvalidPartitionSizes :
invalid partition size, or invalid count of
partition sizes
partitioningDeviceBusy :
specified device is in use and locked by
another process
partitioningDeviceOpenError :
invalid device name
partitioningDeviceError :
device read, write or erase error
partitioningNoMemory :
system running low on memory
partitioningUnknownFailure :
failure unknown
''',
'ciscoflashpartitioningstatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time taken for the operation. This object will
be like a stopwatch, starting when the operation
starts, stopping when the operation completes.
If a management entity keeps a database of completion
times for various operations, it can then use the
stopwatch capability to display percentage completion
time.
''',
'ciscoflashpartitioningtime',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashPartitioningEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashpartitioningtable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashpartitioningtable',
False,
[
_MetaInfoClassMember('ciscoFlashPartitioningEntry', REFERENCE_LIST, 'Ciscoflashpartitioningentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry',
[], [],
''' A Flash partitioning operation entry. Each entry
consists of the command, the target device, the
partition count, and optionally the partition sizes.
A management station wishing to create an entry should
first generate a pseudo-random serial number to be used
as the index to this sparse table. The station should
then create the associated instance of the row status
object. It must also, either in the same or in successive
PDUs, create the associated instance of the command and
parameter objects. It should also modify the default values
for any of the parameter objects if the defaults are not
appropriate.
Once the appropriate instances of all the command
objects have been created, either by an explicit SNMP
set request or by default, the row status should be set
to active to initiate the operation. Note that this entire
procedure may be initiated via a single set request which
specifies a row status of createAndGo as well as specifies
valid values for the non-defaulted parameter objects.
Once an operation has been activated, it cannot be
stopped.
Once the operation completes, the management station should
retrieve the value of the status object (and time if
desired), and delete the entry. In order to prevent old
entries from clogging the table, entries will be aged out,
but an entry will never be deleted within 5 minutes of
completing.
''',
'ciscoflashpartitioningentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashPartitioningTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry.CiscoflashmiscopcommandEnum' : _MetaInfoEnum('CiscoflashmiscopcommandEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'erase':'erase',
'verify':'verify',
'delete':'delete',
'undelete':'undelete',
'squeeze':'squeeze',
'format':'format',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry.CiscoflashmiscopstatusEnum' : _MetaInfoEnum('CiscoflashmiscopstatusEnum', 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB',
{
'miscOpInProgress':'miscOpInProgress',
'miscOpOperationSuccess':'miscOpOperationSuccess',
'miscOpInvalidOperation':'miscOpInvalidOperation',
'miscOpInvalidDestName':'miscOpInvalidDestName',
'miscOpDeviceBusy':'miscOpDeviceBusy',
'miscOpDeviceOpenError':'miscOpDeviceOpenError',
'miscOpDeviceError':'miscOpDeviceError',
'miscOpDeviceNotProgrammable':'miscOpDeviceNotProgrammable',
'miscOpFileOpenError':'miscOpFileOpenError',
'miscOpFileDeleteFailure':'miscOpFileDeleteFailure',
'miscOpFileUndeleteFailure':'miscOpFileUndeleteFailure',
'miscOpFileChecksumError':'miscOpFileChecksumError',
'miscOpNoMemory':'miscOpNoMemory',
'miscOpUnknownFailure':'miscOpUnknownFailure',
'miscOpSqueezeFailure':'miscOpSqueezeFailure',
'miscOpNoSuchFile':'miscOpNoSuchFile',
'miscOpFormatFailure':'miscOpFormatFailure',
}, 'CISCO-FLASH-MIB', _yang_ns._namespaces['CISCO-FLASH-MIB']),
'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry',
False,
[
_MetaInfoClassMember('ciscoFlashMiscOpSerialNumber', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Object which specifies a unique entry in the
table. A management station wishing to initiate a
flash operation should use a pseudo-random value for
this object when creating or modifying an instance of
a ciscoFlashMiscOpEntry.
''',
'ciscoflashmiscopserialnumber',
'CISCO-FLASH-MIB', True),
_MetaInfoClassMember('ciscoFlashMiscOpCommand', REFERENCE_ENUM_CLASS, 'CiscoflashmiscopcommandEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry.CiscoflashmiscopcommandEnum',
[], [],
''' The command to be executed. Mandatory.
Note that it is possible for a system to support
multiple file systems (different file systems on
different Flash devices, or different file systems
on different partitions within a device). Each such
file system may support only a subset of these commands.
If a command is unsupported, the miscOpInvalidOperation(3)
error will be reported in the operation status.
Command Remarks
erase Erase flash.
verify Verify flash file checksum.
delete Delete a file.
undelete Revive a deleted file .
Note that there are limits on
the number of times a file can
be deleted and undeleted. When
this limit is exceeded, the
system will return the appropriate
error.
squeeze Recover space occupied by
deleted files. This command
preserves the good files, erases
out the file system, then restores
the preserved good files.
format Format a flash device.
Command table Parameters
erase MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
verify MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
delete MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
undelete MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
squeeze MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
format MiscOpDestinationName
MiscOpNotifyOnCompletion (opt)
''',
'ciscoflashmiscopcommand',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpDestinationName', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' Destination file, or partition name.
File name must be of the form
[device>:][<partition>:]<file>
where <device> is a value obtained from FlashDeviceName,
<partition> is obtained from FlashPartitionName
and <file> is the name of a file in Flash.
While leading and/or trailing whitespaces are acceptable,
no whitespaces are allowed within the path itself.
A management station could derive its own partition name
as per the description for the ciscoFlashPartitionName
object.
If <device> is not specified, the default Flash device
will be assumed.
If <partition> is not specified, the default partition
will be assumed. If a device is not partitioned into 2
or more partitions, this value may be left out.
For an operation on a partition, eg., the erase
command, this object would specify the partition name
in the form:
[device>:][<partition>:]
''',
'ciscoflashmiscopdestinationname',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpEntryStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' The status of this table entry.
''',
'ciscoflashmiscopentrystatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpNotifyOnCompletion', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether or not a notification should be
generated on the completion of an operation.
If specified, ciscoFlashMiscOpCompletionTrap
will be generated. It is the responsibility of the
management entity to ensure that the SNMP administrative
model is configured in such a way as to allow the
notification to be delivered.
''',
'ciscoflashmiscopnotifyoncompletion',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpStatus', REFERENCE_ENUM_CLASS, 'CiscoflashmiscopstatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry.CiscoflashmiscopstatusEnum',
[], [],
''' The status of the specified operation.
miscOpInProgress :
specified operation is active
miscOpOperationSuccess :
specified operation is supported and completed
successfully
miscOpInvalidOperation :
command invalid or command-protocol-device
combination unsupported
miscOpInvalidDestName :
invalid target name (file or partition or
device name) specified
miscOpDeviceBusy :
specified device is in use and locked by another
process
miscOpDeviceOpenError :
invalid device name
miscOpDeviceError :
device read, write or erase error
miscOpDeviceNotProgrammable :
device is read-only but a write or erase
operation was specified
miscOpFileOpenError :
invalid file name; file not found in partition
miscOpFileDeleteFailure :
file could not be deleted; delete count exceeded
miscOpFileUndeleteFailure :
file could not be undeleted; undelete count
exceeded
miscOpFileChecksumError :
file has a bad checksum
miscOpNoMemory :
system running low on memory
miscOpUnknownFailure :
failure unknown
miscOpSqueezeFailure :
the squeeze operation failed
miscOpNoSuchFile :
a valid but nonexistent file name was specified
miscOpFormatFailure :
the format operation failed
''',
'ciscoflashmiscopstatus',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time taken for the operation. This object will
be like a stopwatch, starting when the operation
starts, stopping when the operation completes.
If a management entity keeps a database of completion
times for various operations, it can then use the
stopwatch capability to display percentage completion
time.
''',
'ciscoflashmiscoptime',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashMiscOpEntry',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib.Ciscoflashmiscoptable' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib.Ciscoflashmiscoptable',
False,
[
_MetaInfoClassMember('ciscoFlashMiscOpEntry', REFERENCE_LIST, 'Ciscoflashmiscopentry' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry',
[], [],
''' A Flash operation entry. Each entry consists of a
command, a target, and any optional parameters.
A management station wishing to create an entry should
first generate a pseudo-random serial number to be used
as the index to this sparse table. The station should
then create the associated instance of the row status
object. It must also, either in the same or in successive
PDUs, create the associated instance of the command and
parameter objects. It should also modify the default values
for any of the parameter objects if the defaults are not
appropriate.
Once the appropriate instances of all the command
objects have been created, either by an explicit SNMP
set request or by default, the row status should be set
to active to initiate the operation. Note that this entire
procedure may be initiated via a single set request which
specifies a row status of createAndGo as well as specifies
valid values for the non-defaulted parameter objects.
Once an operation has been activated, it cannot be
stopped.
Once the operation completes, the management station should
retrieve the value of the status object (and time if
desired), and delete the entry. In order to prevent old
entries from clogging the table, entries will be aged out,
but an entry will never be deleted within 5 minutes of
completing.
''',
'ciscoflashmiscopentry',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'ciscoFlashMiscOpTable',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
'CiscoFlashMib' : {
'meta_info' : _MetaInfoClass('CiscoFlashMib',
False,
[
_MetaInfoClassMember('ciscoFlashCfg', REFERENCE_CLASS, 'Ciscoflashcfg' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcfg',
[], [],
''' ''',
'ciscoflashcfg',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashChipTable', REFERENCE_CLASS, 'Ciscoflashchiptable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashchiptable',
[], [],
''' Table of Flash device chip properties for each
initialized Flash device.
This table is meant primarily for aiding error
diagnosis.
''',
'ciscoflashchiptable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashCopyTable', REFERENCE_CLASS, 'Ciscoflashcopytable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashcopytable',
[], [],
''' A table of Flash copy operation entries. Each
entry represents a Flash copy operation (to or
from Flash) that has been initiated.
''',
'ciscoflashcopytable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDevice', REFERENCE_CLASS, 'Ciscoflashdevice' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashdevice',
[], [],
''' ''',
'ciscoflashdevice',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashDeviceTable', REFERENCE_CLASS, 'Ciscoflashdevicetable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashdevicetable',
[], [],
''' Table of Flash device properties for each initialized
Flash device. Each Flash device installed in a system
is detected, sized, and initialized when the system
image boots up.
For removable Flash devices, the device properties
will be dynamically deleted and recreated as the
device is removed and inserted. Note that in this
case, the newly inserted device may not be the same as
the earlier removed one. The ciscoFlashDeviceInitTime
object is available for a management station to determine
the time at which a device was initialized, and thereby
detect the change of a removable device.
A removable device that has not been installed will
also have an entry in this table. This is to let a
management station know about a removable device that
has been removed. Since a removed device obviously
cannot be sized and initialized, the table entry for
such a device will have
ciscoFlashDeviceSize equal to zero,
and the following objects will have
an indeterminate value:
ciscoFlashDeviceMinPartitionSize,
ciscoFlashDeviceMaxPartitions,
ciscoFlashDevicePartitions, and
ciscoFlashDeviceChipCount.
ciscoFlashDeviceRemovable will be
true to indicate it is removable.
''',
'ciscoflashdevicetable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileByTypeTable', REFERENCE_CLASS, 'Ciscoflashfilebytypetable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfilebytypetable',
[], [],
''' Table of information for files on the manageable
flash devices sorted by File Types.
''',
'ciscoflashfilebytypetable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashFileTable', REFERENCE_CLASS, 'Ciscoflashfiletable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashfiletable',
[], [],
''' Table of information for files in a Flash partition.
''',
'ciscoflashfiletable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashMiscOpTable', REFERENCE_CLASS, 'Ciscoflashmiscoptable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashmiscoptable',
[], [],
''' A table of misc Flash operation entries. Each
entry represents a Flash operation that
has been initiated.
''',
'ciscoflashmiscoptable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitioningTable', REFERENCE_CLASS, 'Ciscoflashpartitioningtable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitioningtable',
[], [],
''' A table of Flash partitioning operation entries. Each
entry represents a Flash partitioning operation that
has been initiated.
''',
'ciscoflashpartitioningtable',
'CISCO-FLASH-MIB', False),
_MetaInfoClassMember('ciscoFlashPartitionTable', REFERENCE_CLASS, 'Ciscoflashpartitiontable' , 'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB', 'CiscoFlashMib.Ciscoflashpartitiontable',
[], [],
''' Table of flash device partition properties for each
initialized flash partition. Whenever there is no
explicit partitioning done, a single partition spanning
the entire device will be assumed to exist. There will
therefore always be atleast one partition on a device.
''',
'ciscoflashpartitiontable',
'CISCO-FLASH-MIB', False),
],
'CISCO-FLASH-MIB',
'CISCO-FLASH-MIB',
_yang_ns._namespaces['CISCO-FLASH-MIB'],
'ydk.models.cisco_ios_xe.CISCO_FLASH_MIB'
),
},
}
_meta_table['CiscoFlashMib.Ciscoflashdevicetable.Ciscoflashdeviceentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashdevicetable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashchiptable.Ciscoflashchipentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashchiptable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashpartitiontable.Ciscoflashpartitionentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashpartitiontable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashfiletable.Ciscoflashfileentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashfiletable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashfilebytypetable.Ciscoflashfilebytypeentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashfilebytypetable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashcopytable.Ciscoflashcopyentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashcopytable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashpartitioningtable.Ciscoflashpartitioningentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashpartitioningtable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashmiscoptable.Ciscoflashmiscopentry']['meta_info'].parent =_meta_table['CiscoFlashMib.Ciscoflashmiscoptable']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashdevice']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashcfg']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashdevicetable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashchiptable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashpartitiontable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashfiletable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashfilebytypetable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashcopytable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashpartitioningtable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
_meta_table['CiscoFlashMib.Ciscoflashmiscoptable']['meta_info'].parent =_meta_table['CiscoFlashMib']['meta_info']
|
111pontes/ydk-py
|
cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_CISCO_FLASH_MIB.py
|
Python
|
apache-2.0
| 120,346
|
[
"VisIt"
] |
1476d424703121827cee9b48a92c97f719fd462b18ecfbb0d1b26079391b03cf
|
"""ovf2vtk -- data conversion from OOMMF's ovf/ovf file format to VTK.
This module contains submodules
omfread -- reading of ovf files
analysis -- some post processing.
There is an executable (usually of name ovf2vtk) which imports these modules
and can be used to convert ovf files to vtk from the command prompt.
hans.fangohr@physics.org (fangohr 02/05/2005 14:33)
"""
from ovf2vtk import __version__
|
fangohr/ovf2vtk
|
ovf2vtk/Lib/__init__.py
|
Python
|
bsd-2-clause
| 413
|
[
"VTK"
] |
6bb9b711fa1cf55294eb5c192cf2d0b602a02ebe05d6efe4a04416f5816e215d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from phoenix.dashboard.views import DashboardView
from django.views.generic import RedirectView
from phoenix.users.views import LoginView
urlpatterns = [#url(r'^$', DashboardView.as_view(), name='dashboard'),
url(r'^$', RedirectView.as_view(pattern_name='animals.animal_list'), name='dashboard'),
url(r'^records/', include('phoenix.records.urls')),
url(r'^health/', include('phoenix.health.urls')),
url(r'^animals/', include('phoenix.animals.urls')),
url(r'^groups/', include('phoenix.groups.urls')),
# Django Admin
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^accounts/', include('allauth.urls')),
url(r'^users/', include('phoenix.users.urls', namespace="users")),
url(r'^login/$', LoginView.as_view(), name='user_login'),
# Third party URLs
url(r'^select2/', include('django_select2.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
savioabuga/phoenix
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,821
|
[
"VisIt"
] |
328e09aa6132204bcd0589809a544609df6c3b7af6ac7023cb9ec9de770dbf65
|
# -*- coding: utf-8 -*-
import numpy as np
from dipy.reconst.cache import Cache
from dipy.core.geometry import cart2sphere
from dipy.reconst.multi_voxel import multi_voxel_fit
from scipy.special import genlaguerre, gamma
from dipy.core.gradients import gradient_table_from_gradient_strength_bvecs
from scipy import special
from warnings import warn
from dipy.reconst import mapmri
try: # preferred scipy >= 0.14, required scipy >= 1.0
from scipy.special import factorial, factorial2
except ImportError:
from scipy.misc import factorial, factorial2
from scipy.optimize import fmin_l_bfgs_b
from dipy.reconst.shm import real_sph_harm
import dipy.reconst.dti as dti
from dipy.utils.optpkg import optional_package
import random
cvxpy, have_cvxpy, _ = optional_package("cvxpy")
plt, have_plt, _ = optional_package("matplotlib.pyplot")
class QtdmriModel(Cache):
r"""The q$\tau$-dMRI model [1] to analytically and continuously represent
the q$\tau$ diffusion signal attenuation over diffusion sensitization
q and diffusion time $\tau$. The model can be seen as an extension of
the MAP-MRI basis [2] towards different diffusion times.
The main idea is to model the diffusion signal over time and space as
a linear combination of continuous functions,
..math::
:nowrap:
\begin{equation}
\hat{E}(\textbf{q},\tau;\textbf{c}) =
\sum_i^{N_{\textbf{q}}}\sum_k^{N_\tau} \textbf{c}_{ik}
\,\Phi_i(\textbf{q})\,T_k(\tau),
\end{equation}
where $\Phi$ and $T$ are the spatial and temporal basis funcions,
$N_{\textbf{q}}$ and $N_\tau$ are the maximum spatial and temporal
order, and $i,k$ are basis order iterators.
The estimation of the coefficients $c_i$ can be regularized using
either analytic Laplacian regularization, sparsity regularization using
the l1-norm, or both to do a type of elastic net regularization.
From the coefficients, there exists an analytical formula to estimate
the ODF, RTOP, RTAP, RTPP, QIV and MSD, for any diffusion time.
Parameters
----------
gtab : GradientTable,
gradient directions and bvalues container class. The bvalues
should be in the normal s/mm^2. big_delta and small_delta need to
given in seconds.
radial_order : unsigned int,
an even integer representing the spatial/radial order of the basis.
time_order : unsigned int,
an integer larger or equal than zero representing the time order
of the basis.
laplacian_regularization : bool,
Regularize using the Laplacian of the qt-dMRI basis.
laplacian_weighting: string or scalar,
The string 'GCV' makes it use generalized cross-validation to find
the regularization weight [3]. A scalar sets the regularization
weight to that value.
l1_regularization : bool,
Regularize by imposing sparsity in the coefficients using the
l1-norm.
l1_weighting : 'CV' or scalar,
The string 'CV' makes it use five-fold cross-validation to find
the regularization weight. A scalar sets the regularization weight
to that value.
cartesian : bool
Whether to use the Cartesian or spherical implementation of the
qt-dMRI basis, which we first explored in [4].
anisotropic_scaling : bool
Whether to use anisotropic scaling or isotropic scaling. This
option can be used to test if the Cartesian implementation is
equivalent with the spherical one when using the same scaling.
normalization : bool
Whether to normalize the basis functions such that their inner
product is equal to one. Normalization is only necessary when
imposing sparsity in the spherical basis if cartesian=False.
constrain_q0 : bool
whether to constrain the q0 point to unity along the tau-space.
This is necessary to ensure that $E(0,\tau)=1$.
bval_threshold : float
the threshold b-value to be used, such that only data points below
that threshold are used when estimating the scale factors.
eigenvalue_threshold : float,
Sets the minimum of the tensor eigenvalues in order to avoid
stability problem.
cvxpy_solver : str, optional
cvxpy solver name. Optionally optimize the positivity constraint
with a particular cvxpy solver. See See http://www.cvxpy.org/ for
details. Default: ECOS.
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
.. [2] Ozarslan E. et al., "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [3] Craven et al. "Smoothing Noisy Data with Spline Functions."
NUMER MATH 31.4 (1978): 377-403.
.. [4] Fick, Rutger HJ, et al. "A unifying framework for spatial and
temporal diffusion in diffusion mri." International Conference on
Information Processing in Medical Imaging. Springer, Cham, 2015.
"""
def __init__(self,
gtab,
radial_order=6,
time_order=2,
laplacian_regularization=False,
laplacian_weighting=0.2,
l1_regularization=False,
l1_weighting=0.1,
cartesian=True,
anisotropic_scaling=True,
normalization=False,
constrain_q0=True,
bval_threshold=1e10,
eigenvalue_threshold=1e-04,
cvxpy_solver="ECOS"
):
if radial_order % 2 or radial_order < 0:
msg = "radial_order must be zero or an even positive integer."
msg += " radial_order %s was given." % radial_order
raise ValueError(msg)
if time_order < 0:
msg = "time_order must be larger or equal than zero integer."
msg += " time_order %s was given." % time_order
raise ValueError(msg)
if not isinstance(laplacian_regularization, bool):
msg = "laplacian_regularization must be True or False."
msg += " Input value was %s." % laplacian_regularization
raise ValueError(msg)
if laplacian_regularization:
msg = "laplacian_regularization weighting must be 'GCV' "
msg += "or a float larger or equal than zero."
msg += " Input value was %s." % laplacian_weighting
if isinstance(laplacian_weighting, str):
if laplacian_weighting is not 'GCV':
raise ValueError(msg)
elif isinstance(laplacian_weighting, float):
if laplacian_weighting < 0:
raise ValueError(msg)
else:
raise ValueError(msg)
if not isinstance(l1_regularization, bool):
msg = "l1_regularization must be True or False."
msg += " Input value was %s." % l1_regularization
raise ValueError(msg)
if l1_regularization:
msg = "l1_weighting weighting must be 'CV' "
msg += "or a float larger or equal than zero."
msg += " Input value was %s." % l1_weighting
if isinstance(l1_weighting, str):
if l1_weighting is not 'CV':
raise ValueError(msg)
elif isinstance(l1_weighting, float):
if l1_weighting < 0:
raise ValueError(msg)
else:
raise ValueError(msg)
if not isinstance(cartesian, bool):
msg = "cartesian must be True or False."
msg += " Input value was %s." % cartesian
raise ValueError(msg)
if not isinstance(anisotropic_scaling, bool):
msg = "anisotropic_scaling must be True or False."
msg += " Input value was %s." % anisotropic_scaling
raise ValueError(msg)
if not isinstance(constrain_q0, bool):
msg = "constrain_q0 must be True or False."
msg += " Input value was %s." % constrain_q0
raise ValueError(msg)
if (not isinstance(bval_threshold, float) or
bval_threshold < 0):
msg = "bval_threshold must be a positive float."
msg += " Input value was %s." % bval_threshold
raise ValueError(msg)
if (not isinstance(eigenvalue_threshold, float) or
eigenvalue_threshold < 0):
msg = "eigenvalue_threshold must be a positive float."
msg += " Input value was %s." % eigenvalue_threshold
raise ValueError(msg)
if laplacian_regularization or l1_regularization:
if not have_cvxpy:
msg = "cvxpy must be installed for Laplacian or l1 "
msg += "regularization."
raise ValueError(msg)
if cvxpy_solver is not None:
if cvxpy_solver not in cvxpy.installed_solvers():
msg = "Input `cvxpy_solver` was set to %s." % cvxpy_solver
msg += " One of %s" % ', '.join(cvxpy.installed_solvers())
msg += " was expected."
raise ValueError(msg)
if l1_regularization and not cartesian and not normalization:
msg = "The non-Cartesian implementation must be normalized for the"
msg += " l1-norm sparsity regularization to work. Set "
msg += "normalization=True to proceed."
raise ValueError(msg)
self.gtab = gtab
self.radial_order = radial_order
self.time_order = time_order
self.laplacian_regularization = laplacian_regularization
self.laplacian_weighting = laplacian_weighting
self.l1_regularization = l1_regularization
self.l1_weighting = l1_weighting
self.cartesian = cartesian
self.anisotropic_scaling = anisotropic_scaling
self.normalization = normalization
self.constrain_q0 = constrain_q0
self.bval_threshold = bval_threshold
self.eigenvalue_threshold = eigenvalue_threshold
self.cvxpy_solver = cvxpy_solver
if self.cartesian:
self.ind_mat = qtdmri_index_matrix(radial_order, time_order)
else:
self.ind_mat = qtdmri_isotropic_index_matrix(radial_order,
time_order)
# precompute parts of laplacian regularization matrices
self.part4_reg_mat_tau = part4_reg_matrix_tau(self.ind_mat, 1.)
self.part23_reg_mat_tau = part23_reg_matrix_tau(self.ind_mat, 1.)
self.part1_reg_mat_tau = part1_reg_matrix_tau(self.ind_mat, 1.)
if self.cartesian:
self.S_mat, self.T_mat, self.U_mat = (
mapmri.mapmri_STU_reg_matrices(radial_order)
)
else:
self.part1_uq_iso_precomp = (
mapmri.mapmri_isotropic_laplacian_reg_matrix_from_index_matrix(
self.ind_mat[:, :3], 1.
)
)
self.tenmodel = dti.TensorModel(gtab)
@multi_voxel_fit
def fit(self, data):
bval_mask = self.gtab.bvals < self.bval_threshold
data_norm = data / data[self.gtab.b0s_mask].mean()
tau = self.gtab.tau
bvecs = self.gtab.bvecs
qvals = self.gtab.qvals
b0s_mask = self.gtab.b0s_mask
if self.cartesian:
if self.anisotropic_scaling:
us, ut, R = qtdmri_anisotropic_scaling(data_norm[bval_mask],
qvals[bval_mask],
bvecs[bval_mask],
tau[bval_mask])
tau_scaling = ut / us.mean()
tau_scaled = tau * tau_scaling
ut /= tau_scaling
us = np.clip(us, self.eigenvalue_threshold, np.inf)
q = np.dot(bvecs, R) * qvals[:, None]
M = qtdmri_signal_matrix_(
self.radial_order, self.time_order, us, ut, q, tau_scaled,
self.normalization
)
else:
us, ut = qtdmri_isotropic_scaling(data_norm, qvals, tau)
tau_scaling = ut / us
tau_scaled = tau * tau_scaling
ut /= tau_scaling
R = np.eye(3)
us = np.tile(us, 3)
q = bvecs * qvals[:, None]
M = qtdmri_signal_matrix_(
self.radial_order, self.time_order, us, ut, q, tau_scaled,
self.normalization
)
else:
us, ut = qtdmri_isotropic_scaling(data_norm, qvals, tau)
tau_scaling = ut / us
tau_scaled = tau * tau_scaling
ut /= tau_scaling
R = np.eye(3)
us = np.tile(us, 3)
q = bvecs * qvals[:, None]
M = qtdmri_isotropic_signal_matrix_(
self.radial_order, self.time_order, us[0], ut, q, tau_scaled,
normalization=self.normalization
)
b0_indices = np.arange(self.gtab.tau.shape[0])[self.gtab.b0s_mask]
tau0_ordered = self.gtab.tau[b0_indices]
unique_taus = np.unique(self.gtab.tau)
first_tau_pos = []
for unique_tau in unique_taus:
first_tau_pos.append(np.where(tau0_ordered == unique_tau)[0][0])
M0 = M[b0_indices[first_tau_pos]]
lopt = 0.
alpha = 0.
if self.laplacian_regularization and not self.l1_regularization:
if self.cartesian:
laplacian_matrix = qtdmri_laplacian_reg_matrix(
self.ind_mat, us, ut, self.S_mat, self.T_mat, self.U_mat,
self.part1_reg_mat_tau,
self.part23_reg_mat_tau,
self.part4_reg_mat_tau,
normalization=self.normalization
)
else:
laplacian_matrix = qtdmri_isotropic_laplacian_reg_matrix(
self.ind_mat, us, ut, self.part1_uq_iso_precomp,
self.part1_reg_mat_tau, self.part23_reg_mat_tau,
self.part4_reg_mat_tau,
normalization=self.normalization
)
if self.laplacian_weighting == 'GCV':
try:
lopt = generalized_crossvalidation(data_norm, M,
laplacian_matrix)
except BaseException:
msg = "Laplacian GCV failed. lopt defaulted to 2e-4."
warn(msg)
lopt = 2e-4
elif np.isscalar(self.laplacian_weighting):
lopt = self.laplacian_weighting
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M)
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data_norm) +
lopt * cvxpy.quad_form(c, laplacian_matrix)
)
if self.constrain_q0:
# just constraint first and last, otherwise the solver fails
constraints = [M0[0] * c == 1,
M0[-1] * c == 1]
else:
constraints = []
prob = cvxpy.Problem(objective, constraints)
try:
prob.solve(solver=self.cvxpy_solver, verbose=False)
cvxpy_solution_optimal = prob.status == 'optimal'
qtdmri_coef = np.asarray(c.value).squeeze()
except BaseException:
qtdmri_coef = np.zeros(M.shape[1])
cvxpy_solution_optimal = False
elif self.l1_regularization and not self.laplacian_regularization:
if self.l1_weighting == 'CV':
alpha = l1_crossvalidation(b0s_mask, data_norm, M)
elif np.isscalar(self.l1_weighting):
alpha = self.l1_weighting
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M)
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data_norm) +
alpha * cvxpy.norm1(c)
)
if self.constrain_q0:
# just constraint first and last, otherwise the solver fails
constraints = [M0[0] * c == 1,
M0[-1] * c == 1]
else:
constraints = []
prob = cvxpy.Problem(objective, constraints)
try:
prob.solve(solver=self.cvxpy_solver, verbose=False)
cvxpy_solution_optimal = prob.status == 'optimal'
qtdmri_coef = np.asarray(c.value).squeeze()
except BaseException:
qtdmri_coef = np.zeros(M.shape[1])
cvxpy_solution_optimal = False
elif self.l1_regularization and self.laplacian_regularization:
if self.cartesian:
laplacian_matrix = qtdmri_laplacian_reg_matrix(
self.ind_mat, us, ut, self.S_mat, self.T_mat, self.U_mat,
self.part1_reg_mat_tau,
self.part23_reg_mat_tau,
self.part4_reg_mat_tau,
normalization=self.normalization
)
else:
laplacian_matrix = qtdmri_isotropic_laplacian_reg_matrix(
self.ind_mat, us, ut, self.part1_uq_iso_precomp,
self.part1_reg_mat_tau, self.part23_reg_mat_tau,
self.part4_reg_mat_tau,
normalization=self.normalization
)
if self.laplacian_weighting == 'GCV':
lopt = generalized_crossvalidation(data_norm, M,
laplacian_matrix)
elif np.isscalar(self.laplacian_weighting):
lopt = self.laplacian_weighting
if self.l1_weighting == 'CV':
alpha = elastic_crossvalidation(b0s_mask, data_norm, M,
laplacian_matrix, lopt)
elif np.isscalar(self.l1_weighting):
alpha = self.l1_weighting
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M)
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data_norm) +
alpha * cvxpy.norm1(c) +
lopt * cvxpy.quad_form(c, laplacian_matrix)
)
if self.constrain_q0:
# just constraint first and last, otherwise the solver fails
constraints = [M0[0] * c == 1,
M0[-1] * c == 1]
else:
constraints = []
prob = cvxpy.Problem(objective, constraints)
try:
prob.solve(solver=self.cvxpy_solver, verbose=False)
cvxpy_solution_optimal = prob.status == 'optimal'
qtdmri_coef = np.asarray(c.value).squeeze()
except BaseException:
qtdmri_coef = np.zeros(M.shape[1])
cvxpy_solution_optimal = False
elif not self.l1_regularization and not self.laplacian_regularization:
# just use least squares with the observation matrix
pseudoInv = np.linalg.pinv(M)
qtdmri_coef = np.dot(pseudoInv, data_norm)
# if cvxpy is used to constraint q0 without regularization the
# solver often fails, so only first tau-position is manually
# normalized.
qtdmri_coef /= np.dot(M0[0], qtdmri_coef)
cvxpy_solution_optimal = None
if cvxpy_solution_optimal is False:
msg = "cvxpy optimization resulted in non-optimal solution. Check "
msg += "cvxpy_solution_optimal attribute in fitted object to see "
msg += "which voxels are affected."
warn(msg)
return QtdmriFit(
self, qtdmri_coef, us, ut, tau_scaling, R, lopt, alpha,
cvxpy_solution_optimal)
class QtdmriFit():
def __init__(self, model, qtdmri_coef, us, ut, tau_scaling, R, lopt,
alpha, cvxpy_solution_optimal):
""" Calculates diffusion properties for a single voxel
Parameters
----------
model : object,
AnalyticalModel
qtdmri_coef : 1d ndarray,
qtdmri coefficients
us : array, 3 x 1
spatial scaling factors
ut : float
temporal scaling factor
tau_scaling : float,
the temporal scaling that used to scale tau to the size of us
R : 3x3 numpy array,
tensor eigenvectors
lopt : float,
laplacian regularization weight
alpha : float,
the l1 regularization weight
cvxpy_solution_optimal: bool,
indicates whether the cvxpy coefficient estimation reach an optimal
solution
"""
self.model = model
self._qtdmri_coef = qtdmri_coef
self.us = us
self.ut = ut
self.tau_scaling = tau_scaling
self.R = R
self.lopt = lopt
self.alpha = alpha
self.cvxpy_solution_optimal = cvxpy_solution_optimal
def qtdmri_to_mapmri_coef(self, tau):
"""This function converts the qtdmri coefficients to mapmri
coefficients for a given tau [1]_. The conversion is performed by a
matrix multiplication that evaluates the time-depenent part of the
basis and multiplies it with the coefficients, after which coefficients
with the same spatial orders are summed up, resulting in mapmri
coefficients.
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
if self.model.cartesian:
II = self.model.cache_get('qtdmri_to_mapmri_matrix',
key=(tau))
if II is None:
II = qtdmri_to_mapmri_matrix(self.model.radial_order,
self.model.time_order, self.ut,
self.tau_scaling * tau)
self.model.cache_set('qtdmri_to_mapmri_matrix',
(tau), II)
else:
II = self.model.cache_get('qtdmri_isotropic_to_mapmri_matrix',
key=(tau))
if II is None:
II = qtdmri_isotropic_to_mapmri_matrix(self.model.radial_order,
self.model.time_order,
self.ut,
self.tau_scaling * tau)
self.model.cache_set('qtdmri_isotropic_to_mapmri_matrix',
(tau), II)
mapmri_coef = np.dot(II, self._qtdmri_coef)
return mapmri_coef
def sparsity_abs(self, threshold=0.99):
"""As a measure of sparsity, calculates the number of largest
coefficients needed to absolute sum up to 99% of the total absolute sum
of all coefficients"""
if not 0. < threshold < 1.:
msg = "sparsity threshold must be between zero and one"
raise ValueError(msg)
total_weight = np.sum(abs(self._qtdmri_coef))
absolute_normalized_coef_array = (
np.sort(abs(self._qtdmri_coef))[::-1] / total_weight)
current_weight = 0.
counter = 0
while current_weight < threshold:
current_weight += absolute_normalized_coef_array[counter]
counter += 1
return counter
def sparsity_density(self, threshold=0.99):
"""As a measure of sparsity, calculates the number of largest
coefficients needed to squared sum up to 99% of the total squared sum
of all coefficients"""
if not 0. < threshold < 1.:
msg = "sparsity threshold must be between zero and one"
raise ValueError(msg)
total_weight = np.sum(self._qtdmri_coef ** 2)
squared_normalized_coef_array = (
np.sort(self._qtdmri_coef ** 2)[::-1] / total_weight)
current_weight = 0.
counter = 0
while current_weight < threshold:
current_weight += squared_normalized_coef_array[counter]
counter += 1
return counter
def odf(self, sphere, tau, s=2):
r""" Calculates the analytical Orientation Distribution Function (ODF)
for a given diffusion time tau from the signal, [1]_ Eq. (32). The
qtdmri coefficients are first converted to mapmri coefficients
following [2].
Parameters
----------
sphere : dipy sphere object
sphere object with vertice orientations to compute the ODF on.
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
s : unsigned int
radial moment of the ODF
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
if self.model.cartesian:
v_ = sphere.vertices
v = np.dot(v_, self.R)
I_s = mapmri.mapmri_odf_matrix(self.model.radial_order, self.us,
s, v)
odf = np.dot(I_s, mapmri_coef)
else:
II = self.model.cache_get('ODF_matrix', key=(sphere, s))
if II is None:
II = mapmri.mapmri_isotropic_odf_matrix(
self.model.radial_order, 1, s, sphere.vertices)
self.model.cache_set('ODF_matrix', (sphere, s), II)
odf = self.us[0] ** s * np.dot(II, mapmri_coef)
return odf
def odf_sh(self, tau, s=2):
r""" Calculates the real analytical odf for a given discrete sphere.
Computes the design matrix of the ODF for the given sphere vertices
and radial moment [1]_ eq. (32). The radial moment s acts as a
sharpening method. The analytical equation for the spherical ODF basis
is given in [2]_ eq. (C8). The qtdmri coefficients are first converted
to mapmri coefficients following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
s : unsigned int
radial moment of the ODF
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
if self.model.cartesian:
msg = 'odf in spherical harmonics not yet implemented for '
msg += 'cartesian implementation'
raise ValueError(msg)
II = self.model.cache_get('ODF_sh_matrix',
key=(self.model.radial_order, s))
if II is None:
II = mapmri.mapmri_isotropic_odf_sh_matrix(self.model.radial_order,
1, s)
self.model.cache_set('ODF_sh_matrix', (self.model.radial_order, s),
II)
odf = self.us[0] ** s * np.dot(II, mapmri_coef)
return odf
def rtpp(self, tau):
r""" Calculates the analytical return to the plane probability (RTPP)
for a given diffusion time tau, [1]_ eq. (42). The analytical formula
for the isotropic MAP-MRI basis was derived in [2]_ eq. (C11). The
qtdmri coefficients are first converted to mapmri coefficients
following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
if self.model.cartesian:
ind_mat = mapmri.mapmri_index_matrix(self.model.radial_order)
Bm = mapmri.b_mat(ind_mat)
sel = Bm > 0. # select only relevant coefficients
const = 1 / (np.sqrt(2 * np.pi) * self.us[0])
ind_sum = (-1.0) ** (ind_mat[sel, 0] / 2.0)
rtpp_vec = const * Bm[sel] * ind_sum * mapmri_coef[sel]
rtpp = rtpp_vec.sum()
return rtpp
else:
ind_mat = mapmri.mapmri_isotropic_index_matrix(
self.model.radial_order
)
rtpp_vec = np.zeros(int(ind_mat.shape[0]))
count = 0
for n in range(0, self.model.radial_order + 1, 2):
for j in range(1, 2 + n // 2):
ll = n + 2 - 2 * j
const = (-1 / 2.0) ** (ll / 2) / np.sqrt(np.pi)
matsum = 0
for k in range(0, j):
matsum += (
(-1) ** k *
mapmri.binomialfloat(j + ll - 0.5, j - k - 1) *
gamma(ll / 2 + k + 1 / 2.0) /
(factorial(k) * 0.5 ** (ll / 2 + 1 / 2.0 + k)))
for m in range(-ll, ll + 1):
rtpp_vec[count] = const * matsum
count += 1
direction = np.array(self.R[:, 0], ndmin=2)
r, theta, phi = cart2sphere(direction[:, 0], direction[:, 1],
direction[:, 2])
rtpp = mapmri_coef * (1 / self.us[0]) *\
rtpp_vec * real_sph_harm(ind_mat[:, 2], ind_mat[:, 1],
theta, phi)
return rtpp.sum()
def rtap(self, tau):
r""" Calculates the analytical return to the axis probability (RTAP)
for a given diffusion time tau, [1]_ eq. (40, 44a). The analytical
formula for the isotropic MAP-MRI basis was derived in [2]_ eq. (C11).
The qtdmri coefficients are first converted to mapmri coefficients
following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
if self.model.cartesian:
ind_mat = mapmri.mapmri_index_matrix(self.model.radial_order)
Bm = mapmri.b_mat(ind_mat)
sel = Bm > 0. # select only relevant coefficients
const = 1 / (2 * np.pi * np.prod(self.us[1:]))
ind_sum = (-1.0) ** ((np.sum(ind_mat[sel, 1:], axis=1) / 2.0))
rtap_vec = const * Bm[sel] * ind_sum * mapmri_coef[sel]
rtap = np.sum(rtap_vec)
else:
ind_mat = mapmri.mapmri_isotropic_index_matrix(
self.model.radial_order
)
rtap_vec = np.zeros(int(ind_mat.shape[0]))
count = 0
for n in range(0, self.model.radial_order + 1, 2):
for j in range(1, 2 + n // 2):
ll = n + 2 - 2 * j
kappa = ((-1) ** (j - 1) * 2. ** (-(ll + 3) / 2.0)) / np.pi
matsum = 0
for k in range(0, j):
matsum += ((-1) ** k *
mapmri.binomialfloat(j + ll - 0.5,
j - k - 1) *
gamma((ll + 1) / 2.0 + k)) /\
(factorial(k) * 0.5 ** ((ll + 1) / 2.0 + k))
for m in range(-ll, ll + 1):
rtap_vec[count] = kappa * matsum
count += 1
rtap_vec *= 2
direction = np.array(self.R[:, 0], ndmin=2)
r, theta, phi = cart2sphere(direction[:, 0],
direction[:, 1], direction[:, 2])
rtap_vec = mapmri_coef * (1 / self.us[0] ** 2) *\
rtap_vec * real_sph_harm(ind_mat[:, 2], ind_mat[:, 1],
theta, phi)
rtap = rtap_vec.sum()
return rtap
def rtop(self, tau):
r""" Calculates the analytical return to the origin probability (RTOP)
for a given diffusion time tau [1]_ eq. (36, 43). The analytical
formula for the isotropic MAP-MRI basis was derived in [2]_ eq. (C11).
The qtdmri coefficients are first converted to mapmri coefficients
following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
if self.model.cartesian:
ind_mat = mapmri.mapmri_index_matrix(self.model.radial_order)
Bm = mapmri.b_mat(ind_mat)
const = 1 / (np.sqrt(8 * np.pi ** 3) * np.prod(self.us))
ind_sum = (-1.0) ** (np.sum(ind_mat, axis=1) / 2)
rtop_vec = const * ind_sum * Bm * mapmri_coef
rtop = rtop_vec.sum()
else:
ind_mat = mapmri.mapmri_isotropic_index_matrix(
self.model.radial_order
)
Bm = mapmri.b_mat_isotropic(ind_mat)
const = 1 / (2 * np.sqrt(2.0) * np.pi ** (3 / 2.0))
rtop_vec = const * (-1.0) ** (ind_mat[:, 0] - 1) * Bm
rtop = (1 / self.us[0] ** 3) * rtop_vec * mapmri_coef
rtop = rtop.sum()
return rtop
def msd(self, tau):
r""" Calculates the analytical Mean Squared Displacement (MSD) for a
given diffusion time tau. It is defined as the Laplacian of the origin
of the estimated signal [1]_. The analytical formula for the MAP-MRI
basis was derived in [2]_ eq. (C13, D1). The qtdmri coefficients are
first converted to mapmri coefficients following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Cheng, J., 2014. Estimation and Processing of Ensemble Average
Propagator and Its Features in Diffusion MRI. Ph.D. Thesis.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
mu = self.us
if self.model.cartesian:
ind_mat = mapmri.mapmri_index_matrix(self.model.radial_order)
Bm = mapmri.b_mat(ind_mat)
sel = Bm > 0. # select only relevant coefficients
ind_sum = np.sum(ind_mat[sel], axis=1)
nx, ny, nz = ind_mat[sel].T
numerator = (-1) ** (0.5 * (-ind_sum)) * np.pi ** (3 / 2.0) *\
((1 + 2 * nx) * mu[0] ** 2 + (1 + 2 * ny) *
mu[1] ** 2 + (1 + 2 * nz) * mu[2] ** 2)
denominator = np.sqrt(2. ** (-ind_sum) * factorial(nx) *
factorial(ny) * factorial(nz)) *\
gamma(0.5 - 0.5 * nx) * gamma(0.5 - 0.5 * ny) *\
gamma(0.5 - 0.5 * nz)
msd_vec = mapmri_coef[sel] * (numerator / denominator)
msd = msd_vec.sum()
else:
ind_mat = mapmri.mapmri_isotropic_index_matrix(
self.model.radial_order
)
Bm = mapmri.b_mat_isotropic(ind_mat)
sel = Bm > 0. # select only relevant coefficients
msd_vec = (4 * ind_mat[sel, 0] - 1) * Bm[sel]
msd = self.us[0] ** 2 * msd_vec * mapmri_coef[sel]
msd = msd.sum()
return msd
def qiv(self, tau):
r""" Calculates the analytical Q-space Inverse Variance (QIV) for given
diffusion time tau.
It is defined as the inverse of the Laplacian of the origin of the
estimated propagator [1]_ eq. (22). The analytical formula for the
MAP-MRI basis was derived in [2]_ eq. (C14, D2). The qtdmri
coefficients are first converted to mapmri coefficients following [3].
Parameters
----------
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Hosseinbor et al. "Bessel fourier orientation reconstruction
(bfor): An analytical diffusion propagator reconstruction for
hybrid diffusion imaging and computation of q-space indices.
NeuroImage 64, 2013, 650–670.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [3] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_coef = self.qtdmri_to_mapmri_coef(tau)
ux, uy, uz = self.us
if self.model.cartesian:
ind_mat = mapmri.mapmri_index_matrix(self.model.radial_order)
Bm = mapmri.b_mat(ind_mat)
sel = Bm > 0 # select only relevant coefficients
nx, ny, nz = ind_mat[sel].T
numerator = 8 * np.pi ** 2 * (ux * uy * uz) ** 3 *\
np.sqrt(factorial(nx) * factorial(ny) * factorial(nz)) *\
gamma(0.5 - 0.5 * nx) * gamma(0.5 - 0.5 * ny) * \
gamma(0.5 - 0.5 * nz)
denominator = np.sqrt(2. ** (-1 + nx + ny + nz)) *\
((1 + 2 * nx) * uy ** 2 * uz ** 2 + ux ** 2 *
((1 + 2 * nz) * uy ** 2 + (1 + 2 * ny) * uz ** 2))
qiv_vec = mapmri_coef[sel] * (numerator / denominator)
qiv = qiv_vec.sum()
else:
ind_mat = mapmri.mapmri_isotropic_index_matrix(
self.model.radial_order
)
Bm = mapmri.b_mat_isotropic(ind_mat)
sel = Bm > 0. # select only relevant coefficients
j = ind_mat[sel, 0]
qiv_vec = ((8 * (-1.0) ** (1 - j) *
np.sqrt(2) * np.pi ** (7 / 2.)) / ((4.0 * j - 1) *
Bm[sel]))
qiv = ux ** 5 * qiv_vec * mapmri_coef[sel]
qiv = qiv.sum()
return qiv
def fitted_signal(self, gtab=None):
"""
Recovers the fitted signal for the given gradient table. If no gradient
table is given it recovers the signal for the gtab of the model object.
"""
if gtab is None:
E = self.predict(self.model.gtab)
else:
E = self.predict(gtab)
return E
def predict(self, qvals_or_gtab, S0=1.):
r"""Recovers the reconstructed signal for any qvalue array or
gradient table.
"""
tau_scaling = self.tau_scaling
if isinstance(qvals_or_gtab, np.ndarray):
q = qvals_or_gtab[:, :3]
tau = qvals_or_gtab[:, 3] * tau_scaling
else:
gtab = qvals_or_gtab
qvals = gtab.qvals
tau = gtab.tau * tau_scaling
q = qvals[:, None] * gtab.bvecs
if self.model.cartesian:
if self.model.anisotropic_scaling:
q_rot = np.dot(q, self.R)
M = qtdmri_signal_matrix_(self.model.radial_order,
self.model.time_order,
self.us, self.ut, q_rot, tau,
self.model.normalization)
else:
M = qtdmri_signal_matrix_(self.model.radial_order,
self.model.time_order,
self.us, self.ut, q, tau,
self.model.normalization)
else:
M = qtdmri_isotropic_signal_matrix_(
self.model.radial_order, self.model.time_order,
self.us[0], self.ut, q, tau,
normalization=self.model.normalization)
E = S0 * np.dot(M, self._qtdmri_coef)
return E
def norm_of_laplacian_signal(self):
""" Calculates the norm of the laplacian of the fitted signal [1]_.
This information could be useful to assess if the extrapolation of the
fitted signal contains spurious oscillations. A high laplacian norm may
indicate that these are present, and any q-space indices that
use integrals of the signal may be corrupted (e.g. RTOP, RTAP, RTPP,
QIV). In contrast to [1], the Laplacian now describes oscillations in
the 4-dimensional qt-signal [2].
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [2] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
if self.model.cartesian:
lap_matrix = qtdmri_laplacian_reg_matrix(
self.model.ind_mat, self.us, self.ut,
self.model.S_mat, self.model.T_mat, self.model.U_mat,
self.model.part1_reg_mat_tau,
self.model.part23_reg_mat_tau,
self.model.part4_reg_mat_tau,
normalization=self.model.normalization
)
else:
lap_matrix = qtdmri_isotropic_laplacian_reg_matrix(
self.model.ind_mat, self.us, self.ut,
self.model.part1_uq_iso_precomp,
self.model.part1_reg_mat_tau,
self.model.part23_reg_mat_tau,
self.model.part4_reg_mat_tau,
normalization=self.model.normalization
)
norm_laplacian = np.dot(self._qtdmri_coef,
np.dot(self._qtdmri_coef, lap_matrix))
return norm_laplacian
def pdf(self, rt_points):
""" Diffusion propagator on a given set of real points.
if the array r_points is non writeable, then intermediate
results are cached for faster recalculation
"""
tau_scaling = self.tau_scaling
rt_points_ = rt_points * np.r_[1, 1, 1, tau_scaling]
if self.model.cartesian:
K = qtdmri_eap_matrix_(self.model.radial_order,
self.model.time_order,
self.us, self.ut, rt_points_,
self.model.normalization)
else:
K = qtdmri_isotropic_eap_matrix_(
self.model.radial_order, self.model.time_order,
self.us[0], self.ut, rt_points_,
normalization=self.model.normalization
)
eap = np.dot(K, self._qtdmri_coef)
return eap
def qtdmri_to_mapmri_matrix(radial_order, time_order, ut, tau):
"""Generates the matrix that maps the qtdmri coefficients to MAP-MRI
coefficients. The conversion is done by only evaluating the time basis for
a diffusion time tau and summing up coefficients with the same spatial
basis orders [1].
Parameters
----------
radial_order : unsigned int,
an even integer representing the spatial/radial order of the basis.
time_order : unsigned int,
an integer larger or equal than zero representing the time order
of the basis.
ut : float
temporal scaling factor
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_ind_mat = mapmri.mapmri_index_matrix(radial_order)
n_elem_mapmri = int(mapmri_ind_mat.shape[0])
qtdmri_ind_mat = qtdmri_index_matrix(radial_order, time_order)
n_elem_qtdmri = int(qtdmri_ind_mat.shape[0])
temporal_storage = np.zeros(time_order + 1)
for o in range(time_order + 1):
temporal_storage[o] = temporal_basis(o, ut, tau)
counter = 0
mapmri_mat = np.zeros((n_elem_mapmri, n_elem_qtdmri))
for nxt, nyt, nzt, o in qtdmri_ind_mat:
index_overlap = np.all([nxt == mapmri_ind_mat[:, 0],
nyt == mapmri_ind_mat[:, 1],
nzt == mapmri_ind_mat[:, 2]], 0)
mapmri_mat[:, counter] = temporal_storage[o] * index_overlap
counter += 1
return mapmri_mat
def qtdmri_isotropic_to_mapmri_matrix(radial_order, time_order, ut, tau):
"""Generates the matrix that maps the spherical qtdmri coefficients to
MAP-MRI coefficients. The conversion is done by only evaluating the time
basis for a diffusion time tau and summing up coefficients with the same
spatial basis orders [1].
Parameters
----------
radial_order : unsigned int,
an even integer representing the spatial/radial order of the basis.
time_order : unsigned int,
an integer larger or equal than zero representing the time order
of the basis.
ut : float
temporal scaling factor
tau : float
diffusion time (big_delta - small_delta / 3.) in seconds
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
mapmri_ind_mat = mapmri.mapmri_isotropic_index_matrix(radial_order)
n_elem_mapmri = int(mapmri_ind_mat.shape[0])
qtdmri_ind_mat = qtdmri_isotropic_index_matrix(radial_order, time_order)
n_elem_qtdmri = int(qtdmri_ind_mat.shape[0])
temporal_storage = np.zeros(time_order + 1)
for o in range(time_order + 1):
temporal_storage[o] = temporal_basis(o, ut, tau)
counter = 0
mapmri_isotropic_mat = np.zeros((n_elem_mapmri, n_elem_qtdmri))
for j, ll, m, o in qtdmri_ind_mat:
index_overlap = np.all([j == mapmri_ind_mat[:, 0],
ll == mapmri_ind_mat[:, 1],
m == mapmri_ind_mat[:, 2]], 0)
mapmri_isotropic_mat[:, counter] = temporal_storage[o] * index_overlap
counter += 1
return mapmri_isotropic_mat
def qtdmri_temporal_normalization(ut):
"""Normalization factor for the temporal basis"""
return np.sqrt(ut)
def qtdmri_mapmri_normalization(mu):
"""Normalization factor for Cartesian MAP-MRI basis. The scaling is the
same for every basis function depending only on the spatial scaling
mu.
"""
sqrtC = np.sqrt(8 * np.prod(mu)) * np.pi ** (3. / 4.)
return sqrtC
def qtdmri_mapmri_isotropic_normalization(j, l, u0):
"""Normalization factor for Spherical MAP-MRI basis. The normalization
for a basis function with orders [j,l,m] depends only on orders j,l and
the isotropic scale factor.
"""
sqrtC = ((2 * np.pi) ** (3. / 2.) *
np.sqrt(2 ** l * u0 ** 3 * gamma(j) / gamma(j + l + 1. / 2.)))
return sqrtC
def qtdmri_signal_matrix_(radial_order, time_order, us, ut, q, tau,
normalization=False):
"""Function to generate the qtdmri signal basis."""
M = qtdmri_signal_matrix(radial_order, time_order, us, ut, q, tau)
if normalization:
sqrtC = qtdmri_mapmri_normalization(us)
sqrtut = qtdmri_temporal_normalization(ut)
sqrtCut = sqrtC * sqrtut
M *= sqrtCut
return M
def qtdmri_signal_matrix(radial_order, time_order, us, ut, q, tau):
r"""Constructs the design matrix as a product of 3 separated radial,
angular and temporal design matrices. It precomputes the relevant basis
orders for each one and finally puts them together according to the index
matrix
"""
ind_mat = qtdmri_index_matrix(radial_order, time_order)
n_dat = int(q.shape[0])
n_elem = int(ind_mat.shape[0])
qx, qy, qz = q.T
mux, muy, muz = us
temporal_storage = np.zeros((n_dat, time_order + 1))
for o in range(time_order + 1):
temporal_storage[:, o] = temporal_basis(o, ut, tau)
Qx_storage = np.array(np.zeros((n_dat, radial_order + 1 + 4)),
dtype=complex)
Qy_storage = np.array(np.zeros((n_dat, radial_order + 1 + 4)),
dtype=complex)
Qz_storage = np.array(np.zeros((n_dat, radial_order + 1 + 4)),
dtype=complex)
for n in range(radial_order + 1 + 4):
Qx_storage[:, n] = mapmri.mapmri_phi_1d(n, qx, mux)
Qy_storage[:, n] = mapmri.mapmri_phi_1d(n, qy, muy)
Qz_storage[:, n] = mapmri.mapmri_phi_1d(n, qz, muz)
counter = 0
Q = np.zeros((n_dat, n_elem))
for nx, ny, nz, o in ind_mat:
Q[:, counter] = (np.real(
Qx_storage[:, nx] * Qy_storage[:, ny] * Qz_storage[:, nz]) *
temporal_storage[:, o]
)
counter += 1
return Q
def qtdmri_eap_matrix(radial_order, time_order, us, ut, grid):
r"""Constructs the design matrix as a product of 3 separated radial,
angular and temporal design matrices. It precomputes the relevant basis
orders for each one and finally puts them together according to the index
matrix
"""
ind_mat = qtdmri_index_matrix(radial_order, time_order)
rx, ry, rz, tau = grid.T
n_dat = int(rx.shape[0])
n_elem = int(ind_mat.shape[0])
mux, muy, muz = us
temporal_storage = np.zeros((n_dat, time_order + 1))
for o in range(time_order + 1):
temporal_storage[:, o] = temporal_basis(o, ut, tau)
Kx_storage = np.zeros((n_dat, radial_order + 1))
Ky_storage = np.zeros((n_dat, radial_order + 1))
Kz_storage = np.zeros((n_dat, radial_order + 1))
for n in range(radial_order + 1):
Kx_storage[:, n] = mapmri.mapmri_psi_1d(n, rx, mux)
Ky_storage[:, n] = mapmri.mapmri_psi_1d(n, ry, muy)
Kz_storage[:, n] = mapmri.mapmri_psi_1d(n, rz, muz)
counter = 0
K = np.zeros((n_dat, n_elem))
for nx, ny, nz, o in ind_mat:
K[:, counter] = (
Kx_storage[:, nx] * Ky_storage[:, ny] * Kz_storage[:, nz] *
temporal_storage[:, o]
)
counter += 1
return K
def qtdmri_isotropic_signal_matrix_(radial_order, time_order, us, ut, q, tau,
normalization=False):
M = qtdmri_isotropic_signal_matrix(
radial_order, time_order, us, ut, q, tau
)
if normalization:
ind_mat = qtdmri_isotropic_index_matrix(radial_order, time_order)
j, ll = ind_mat[:, :2].T
sqrtut = qtdmri_temporal_normalization(ut)
sqrtC = qtdmri_mapmri_isotropic_normalization(j, ll, us)
sqrtCut = sqrtC * sqrtut
M = M * sqrtCut[None, :]
return M
def qtdmri_isotropic_signal_matrix(radial_order, time_order, us, ut, q, tau):
ind_mat = qtdmri_isotropic_index_matrix(radial_order, time_order)
qvals, theta, phi = cart2sphere(q[:, 0], q[:, 1], q[:, 2])
n_dat = int(qvals.shape[0])
n_elem = int(ind_mat.shape[0])
num_j = int(np.max(ind_mat[:, 0]))
num_o = int(time_order + 1)
num_l = int(radial_order // 2 + 1)
num_m = int(radial_order * 2 + 1)
# Radial Basis
radial_storage = np.zeros([num_j, num_l, n_dat])
for j in range(1, num_j + 1):
for ll in range(0, radial_order + 1, 2):
radial_storage[j - 1, ll // 2, :] = radial_basis_opt(
j, ll, us, qvals)
# Angular Basis
angular_storage = np.zeros([num_l, num_m, n_dat])
for ll in range(0, radial_order + 1, 2):
for m in range(-ll, ll + 1):
angular_storage[ll // 2, m + ll, :] = (
angular_basis_opt(ll, m, qvals, theta, phi)
)
# Temporal Basis
temporal_storage = np.zeros([num_o + 1, n_dat])
for o in range(0, num_o + 1):
temporal_storage[o, :] = temporal_basis(o, ut, tau)
# Construct full design matrix
M = np.zeros((n_dat, n_elem))
counter = 0
for j, ll, m, o in ind_mat:
M[:, counter] = (radial_storage[j - 1, ll // 2, :] *
angular_storage[ll // 2, m + ll, :] *
temporal_storage[o, :])
counter += 1
return M
def qtdmri_eap_matrix_(radial_order, time_order, us, ut, grid,
normalization=False):
sqrtC = 1.
sqrtut = 1.
sqrtCut = 1.
if normalization:
sqrtC = qtdmri_mapmri_normalization(us)
sqrtut = qtdmri_temporal_normalization(ut)
sqrtCut = sqrtC * sqrtut
K_tau = (
qtdmri_eap_matrix(radial_order, time_order, us, ut, grid) * sqrtCut
)
return K_tau
def qtdmri_isotropic_eap_matrix_(radial_order, time_order, us, ut, grid,
normalization=False):
K = qtdmri_isotropic_eap_matrix(
radial_order, time_order, us, ut, grid
)
if normalization:
ind_mat = qtdmri_isotropic_index_matrix(radial_order, time_order)
j, ll = ind_mat[:, :2].T
sqrtut = qtdmri_temporal_normalization(ut)
sqrtC = qtdmri_mapmri_isotropic_normalization(j, ll, us)
sqrtCut = sqrtC * sqrtut
K = K * sqrtCut[None, :]
return K
def qtdmri_isotropic_eap_matrix(radial_order, time_order, us, ut, grid):
r"""Constructs the design matrix as a product of 3 separated radial,
angular and temporal design matrices. It precomputes the relevant basis
orders for each one and finally puts them together according to the index
matrix
"""
rx, ry, rz, tau = grid.T
R, theta, phi = cart2sphere(rx, ry, rz)
theta[np.isnan(theta)] = 0
ind_mat = qtdmri_isotropic_index_matrix(radial_order, time_order)
n_dat = int(R.shape[0])
n_elem = int(ind_mat.shape[0])
num_j = int(np.max(ind_mat[:, 0]))
num_o = int(time_order + 1)
num_l = int(radial_order / 2 + 1)
num_m = int(radial_order * 2 + 1)
# Radial Basis
radial_storage = np.zeros([num_j, num_l, n_dat])
for j in range(1, num_j + 1):
for ll in range(0, radial_order + 1, 2):
radial_storage[j - 1, ll // 2, :] = radial_basis_EAP_opt(
j, ll, us, R)
# Angular Basis
angular_storage = np.zeros([num_j, num_l, num_m, n_dat])
for j in range(1, num_j + 1):
for ll in range(0, radial_order + 1, 2):
for m in range(-ll, ll + 1):
angular_storage[j - 1, ll // 2, m + ll, :] = (
angular_basis_EAP_opt(j, ll, m, R, theta, phi)
)
# Temporal Basis
temporal_storage = np.zeros([num_o + 1, n_dat])
for o in range(0, num_o + 1):
temporal_storage[o, :] = temporal_basis(o, ut, tau)
# Construct full design matrix
M = np.zeros((n_dat, n_elem))
counter = 0
for j, ll, m, o in ind_mat:
M[:, counter] = (radial_storage[j - 1, ll // 2, :] *
angular_storage[j - 1, ll // 2, m + ll, :] *
temporal_storage[o, :])
counter += 1
return M
def radial_basis_opt(j, l, us, q):
""" Spatial basis dependent on spatial scaling factor us
"""
const = (
us ** l * np.exp(-2 * np.pi ** 2 * us ** 2 * q ** 2) *
genlaguerre(j - 1, l + 0.5)(4 * np.pi ** 2 * us ** 2 * q ** 2)
)
return const
def angular_basis_opt(l, m, q, theta, phi):
""" Angular basis independent of spatial scaling factor us. Though it
includes q, it is independent of the data and can be precomputed.
"""
const = (
(-1) ** (l / 2) * np.sqrt(4.0 * np.pi) *
(2 * np.pi ** 2 * q ** 2) ** (l / 2) *
real_sph_harm(m, l, theta, phi)
)
return const
def radial_basis_EAP_opt(j, l, us, r):
radial_part = (
(us ** 3) ** (-1) / (us ** 2) ** (l / 2) *
np.exp(- r ** 2 / (2 * us ** 2)) *
genlaguerre(j - 1, l + 0.5)(r ** 2 / us ** 2)
)
return radial_part
def angular_basis_EAP_opt(j, l, m, r, theta, phi):
angular_part = (
(-1) ** (j - 1) * (np.sqrt(2) * np.pi) ** (-1) *
(r ** 2 / 2) ** (l / 2) * real_sph_harm(m, l, theta, phi)
)
return angular_part
def temporal_basis(o, ut, tau):
""" Temporal basis dependent on temporal scaling factor ut
"""
const = np.exp(-ut * tau / 2.0) * special.laguerre(o)(ut * tau)
return const
def qtdmri_index_matrix(radial_order, time_order):
"""Computes the SHORE basis order indices according to [1].
"""
index_matrix = []
for n in range(0, radial_order + 1, 2):
for i in range(0, n + 1):
for j in range(0, n - i + 1):
for o in range(0, time_order + 1):
index_matrix.append([n - i - j, j, i, o])
return np.array(index_matrix)
def qtdmri_isotropic_index_matrix(radial_order, time_order):
"""Computes the SHORE basis order indices according to [1].
"""
index_matrix = []
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
ll = n + 2 - 2 * j
for m in range(-ll, ll + 1):
for o in range(0, time_order + 1):
index_matrix.append([j, ll, m, o])
return np.array(index_matrix)
def qtdmri_laplacian_reg_matrix(ind_mat, us, ut,
S_mat=None, T_mat=None, U_mat=None,
part1_ut_precomp=None,
part23_ut_precomp=None,
part4_ut_precomp=None,
normalization=False):
"""Computes the cartesian qt-dMRI Laplacian regularization matrix. If
given, uses precomputed matrices for temporal and spatial regularization
matrices to speed up computation. Follows the the formulation of Appendix B
in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
if S_mat is None or T_mat is None or U_mat is None:
radial_order = ind_mat[:, :3].max()
S_mat, T_mat, U_mat = mapmri.mapmri_STU_reg_matrices(radial_order)
part1_us = mapmri.mapmri_laplacian_reg_matrix(ind_mat[:, :3], us,
S_mat, T_mat, U_mat)
part23_us = part23_reg_matrix_q(ind_mat, U_mat, T_mat, us)
part4_us = part4_reg_matrix_q(ind_mat, U_mat, us)
if part1_ut_precomp is None:
part1_ut = part1_reg_matrix_tau(ind_mat, ut)
else:
part1_ut = part1_ut_precomp / ut
if part23_ut_precomp is None:
part23_ut = part23_reg_matrix_tau(ind_mat, ut)
else:
part23_ut = part23_ut_precomp * ut
if part4_ut_precomp is None:
part4_ut = part4_reg_matrix_tau(ind_mat, ut)
else:
part4_ut = part4_ut_precomp * ut ** 3
regularization_matrix = (
part1_us * part1_ut + part23_us * part23_ut + part4_us * part4_ut
)
if normalization:
temporal_normalization = qtdmri_temporal_normalization(ut) ** 2
spatial_normalization = qtdmri_mapmri_normalization(us) ** 2
regularization_matrix *= temporal_normalization * spatial_normalization
return regularization_matrix
def qtdmri_isotropic_laplacian_reg_matrix(ind_mat, us, ut,
part1_uq_iso_precomp=None,
part1_ut_precomp=None,
part23_ut_precomp=None,
part4_ut_precomp=None,
normalization=False):
"""Computes the spherical qt-dMRI Laplacian regularization matrix. If
given, uses precomputed matrices for temporal and spatial regularization
matrices to speed up computation. Follows the the formulation of Appendix C
in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
if part1_uq_iso_precomp is None:
part1_us = (
mapmri.mapmri_isotropic_laplacian_reg_matrix_from_index_matrix(
ind_mat[:, :3], us[0]
)
)
else:
part1_us = part1_uq_iso_precomp * us[0]
if part1_ut_precomp is None:
part1_ut = part1_reg_matrix_tau(ind_mat, ut)
else:
part1_ut = part1_ut_precomp / ut
if part23_ut_precomp is None:
part23_ut = part23_reg_matrix_tau(ind_mat, ut)
else:
part23_ut = part23_ut_precomp * ut
if part4_ut_precomp is None:
part4_ut = part4_reg_matrix_tau(ind_mat, ut)
else:
part4_ut = part4_ut_precomp * ut ** 3
part23_us = part23_iso_reg_matrix_q(ind_mat, us[0])
part4_us = part4_iso_reg_matrix_q(ind_mat, us[0])
regularization_matrix = (
part1_us * part1_ut + part23_us * part23_ut + part4_us * part4_ut
)
if normalization:
temporal_normalization = qtdmri_temporal_normalization(ut) ** 2
spatial_normalization = np.zeros_like(regularization_matrix)
j, ll = ind_mat[:, :2].T
pre_spatial_norm = qtdmri_mapmri_isotropic_normalization(j, ll, us[0])
spatial_normalization = np.outer(pre_spatial_norm, pre_spatial_norm)
regularization_matrix *= temporal_normalization * spatial_normalization
return regularization_matrix
def part23_reg_matrix_q(ind_mat, U_mat, T_mat, us):
"""Partial cartesian spatial Laplacian regularization matrix following
second line of Eq. (B2) in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
ux, uy, uz = us
x, y, z, _ = ind_mat.T
n_elem = int(ind_mat.shape[0])
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
val = 0
if x[i] == x[k] and y[i] == y[k]:
val += (
(uz / (ux * uy)) *
U_mat[x[i], x[k]] * U_mat[y[i], y[k]] * T_mat[z[i], z[k]]
)
if x[i] == x[k] and z[i] == z[k]:
val += (
(uy / (ux * uz)) *
U_mat[x[i], x[k]] * T_mat[y[i], y[k]] * U_mat[z[i], z[k]]
)
if y[i] == y[k] and z[i] == z[k]:
val += (
(ux / (uy * uz)) *
T_mat[x[i], x[k]] * U_mat[y[i], y[k]] * U_mat[z[i], z[k]]
)
LR[i, k] = LR[k, i] = val
return LR
def part23_iso_reg_matrix_q(ind_mat, us):
"""Partial spherical spatial Laplacian regularization matrix following the
equation below Eq. (C4) in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
n_elem = int(ind_mat.shape[0])
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
if ind_mat[i, 1] == ind_mat[k, 1] and \
ind_mat[i, 2] == ind_mat[k, 2]:
ji = ind_mat[i, 0]
jk = ind_mat[k, 0]
ll = ind_mat[i, 1]
if ji == (jk + 1):
LR[i, k] = LR[k, i] = (
2. ** (-ll) * -gamma(3 / 2.0 + jk + ll) / gamma(jk)
)
elif ji == jk:
LR[i, k] = LR[k, i] = 2. ** (-(ll + 1)) *\
(1 - 4 * ji - 2 * ll) *\
gamma(1 / 2.0 + ji + ll) / gamma(ji)
elif ji == (jk - 1):
LR[i, k] = LR[k, i] = 2. ** (-ll) *\
-gamma(3 / 2.0 + ji + ll) / gamma(ji)
return LR / us
def part4_reg_matrix_q(ind_mat, U_mat, us):
"""Partial cartesian spatial Laplacian regularization matrix following
equation Eq. (B2) in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
ux, uy, uz = us
x, y, z, _ = ind_mat.T
n_elem = int(ind_mat.shape[0])
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
if x[i] == x[k] and y[i] == y[k] and z[i] == z[k]:
LR[i, k] = LR[k, i] = (
(1. / (ux * uy * uz)) * U_mat[x[i], x[k]] *
U_mat[y[i], y[k]] * U_mat[z[i], z[k]]
)
return LR
def part4_iso_reg_matrix_q(ind_mat, us):
"""Partial spherical spatial Laplacian regularization matrix following the
equation below Eq. (C4) in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
n_elem = int(ind_mat.shape[0])
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
if ind_mat[i, 0] == ind_mat[k, 0] and \
ind_mat[i, 1] == ind_mat[k, 1] and \
ind_mat[i, 2] == ind_mat[k, 2]:
ji = ind_mat[i, 0]
ll = ind_mat[i, 1]
LR[i, k] = LR[k, i] = (
2. ** (-(ll + 2)) * gamma(1 / 2.0 + ji + ll) /
(np.pi ** 2 * gamma(ji))
)
return LR / us ** 3
def part1_reg_matrix_tau(ind_mat, ut):
"""Partial temporal Laplacian regularization matrix following
Appendix B in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
n_elem = int(ind_mat.shape[0])
LD = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
oi = ind_mat[i, 3]
ok = ind_mat[k, 3]
if oi == ok:
LD[i, k] = LD[k, i] = 1. / ut
return LD
def part23_reg_matrix_tau(ind_mat, ut):
"""Partial temporal Laplacian regularization matrix following
Appendix B in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
n_elem = int(ind_mat.shape[0])
LD = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
oi = ind_mat[i, 3]
ok = ind_mat[k, 3]
if oi == ok:
LD[i, k] = LD[k, i] = 1 / 2.
else:
LD[i, k] = LD[k, i] = np.abs(oi - ok)
return ut * LD
def part4_reg_matrix_tau(ind_mat, ut):
"""Partial temporal Laplacian regularization matrix following
Appendix B in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
n_elem = int(ind_mat.shape[0])
LD = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
oi = ind_mat[i, 3]
ok = ind_mat[k, 3]
sum1 = 0
for p in range(1, min([ok, oi]) + 1 + 1):
sum1 += (oi - p) * (ok - p) * H(min([oi, ok]) - p)
sum2 = 0
for p in range(0, min(ok - 2, oi - 1) + 1):
sum2 += p
sum3 = 0
for p in range(0, min(ok - 1, oi - 2) + 1):
sum3 += p
LD[i, k] = LD[k, i] = (
0.25 * np.abs(oi - ok) + (1 / 16.) * mapmri.delta(oi, ok) +
min([oi, ok]) + sum1 + H(oi - 1) * H(ok - 1) *
(oi + ok - 2 + sum2 + sum3 + H(abs(oi - ok) - 1) *
(abs(oi - ok) - 1) * min([ok - 1, oi - 1]))
)
return LD * ut ** 3
def H(value):
"""Step function of H(x)=1 if x>=0 and zero otherwise. Used for the
temporal laplacian matrix."""
if value >= 0:
return 1
return 0
def generalized_crossvalidation(data, M, LR, startpoint=5e-4):
r"""Generalized Cross Validation Function [1].
References
----------
.. [1] Craven et al. "Smoothing Noisy Data with Spline Functions."
NUMER MATH 31.4 (1978): 377-403.
"""
startpoint = 1e-4
MMt = np.dot(M.T, M)
K = len(data)
input_stuff = (data, M, MMt, K, LR)
bounds = ((1e-5, 1),)
res = fmin_l_bfgs_b(lambda x,
input_stuff: GCV_cost_function(x, input_stuff),
(startpoint), args=(input_stuff,), approx_grad=True,
bounds=bounds, disp=False, pgtol=1e-10, factr=10.)
return res[0][0]
def GCV_cost_function(weight, arguments):
r"""Generalized Cross Validation Function that is iterated [1].
References
----------
.. [1] Craven et al. "Smoothing Noisy Data with Spline Functions."
NUMER MATH 31.4 (1978): 377-403.
"""
data, M, MMt, K, LR = arguments
S = np.dot(np.dot(M, np.linalg.pinv(MMt + weight * LR)), M.T)
trS = np.trace(S)
normyytilde = np.linalg.norm(data - np.dot(S, data), 2)
gcv_value = normyytilde / (K - trS)
return gcv_value
def qtdmri_isotropic_scaling(data, q, tau):
""" Constructs design matrix for fitting an exponential to the
diffusion time points.
"""
dataclip = np.clip(data, 1e-05, 1.)
logE = -np.log(dataclip)
logE_q = logE / (2 * np.pi ** 2)
logE_tau = logE * 2
B_q = np.array([q * q])
inv_B_q = np.linalg.pinv(B_q)
B_tau = np.array([tau])
inv_B_tau = np.linalg.pinv(B_tau)
us = np.sqrt(np.dot(logE_q, inv_B_q))
ut = np.dot(logE_tau, inv_B_tau)
return us, ut
def qtdmri_anisotropic_scaling(data, q, bvecs, tau):
""" Constructs design matrix for fitting an exponential to the
diffusion time points.
"""
dataclip = np.clip(data, 1e-05, 10e10)
logE = -np.log(dataclip)
logE_q = logE / (2 * np.pi ** 2)
logE_tau = logE * 2
B_q = design_matrix_spatial(bvecs, q)
inv_B_q = np.linalg.pinv(B_q)
A = np.dot(inv_B_q, logE_q)
evals, R = dti.decompose_tensor(dti.from_lower_triangular(A))
us = np.sqrt(evals)
B_tau = np.array([tau])
inv_B_tau = np.linalg.pinv(B_tau)
ut = np.dot(logE_tau, inv_B_tau)
return us, ut, R
def design_matrix_spatial(bvecs, qvals, dtype=None):
""" Constructs design matrix for DTI weighted least squares or
least squares fitting. (Basser et al., 1994a)
Parameters
----------
bvecs : array (N x 3)
unit b-vectors of the acquisition.
qvals : array (N,)
corresponding q-values in 1/mm
Returns
-------
design_matrix : array (g,7)
Design matrix or B matrix assuming Gaussian distributed tensor model
design_matrix[j, :] = (Bxx, Byy, Bzz, Bxy, Bxz, Byz, dummy)
"""
B = np.zeros((bvecs.shape[0], 6))
B[:, 0] = bvecs[:, 0] * bvecs[:, 0] * 1. * qvals ** 2 # Bxx
B[:, 1] = bvecs[:, 0] * bvecs[:, 1] * 2. * qvals ** 2 # Bxy
B[:, 2] = bvecs[:, 1] * bvecs[:, 1] * 1. * qvals ** 2 # Byy
B[:, 3] = bvecs[:, 0] * bvecs[:, 2] * 2. * qvals ** 2 # Bxz
B[:, 4] = bvecs[:, 1] * bvecs[:, 2] * 2. * qvals ** 2 # Byz
B[:, 5] = bvecs[:, 2] * bvecs[:, 2] * 1. * qvals ** 2 # Bzz
return B
def create_rt_space_grid(grid_size_r, max_radius_r, grid_size_tau,
min_radius_tau, max_radius_tau):
""" Generates EAP grid (for potential positivity constraint)."""
tau_list = np.linspace(min_radius_tau, max_radius_tau, grid_size_tau)
constraint_grid_tau = np.c_[0., 0., 0., 0.]
for tau in tau_list:
constraint_grid = mapmri.create_rspace(grid_size_r, max_radius_r)
constraint_grid_tau = np.vstack(
[constraint_grid_tau,
np.c_[constraint_grid, np.zeros(constraint_grid.shape[0]) + tau]]
)
return constraint_grid_tau[1:]
def qtdmri_number_of_coefficients(radial_order, time_order):
"""Computes the total number of coefficients of the qtdmri basis given a
radial and temporal order. Equation given below Eq (9) in [1].
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
"""
F = np.floor(radial_order / 2.)
Msym = (F + 1) * (F + 2) * (4 * F + 3) / 6
M_total = Msym * (time_order + 1)
return M_total
def l1_crossvalidation(b0s_mask, E, M, weight_array=np.linspace(0, .4, 21)):
"""cross-validation function to find the optimal weight of alpha for
sparsity regularization"""
dwi_mask = ~b0s_mask
b0_mask = b0s_mask
dwi_indices = np.arange(E.shape[0])[dwi_mask]
b0_indices = np.arange(E.shape[0])[b0_mask]
random.shuffle(dwi_indices)
sub0 = dwi_indices[0::5]
sub1 = dwi_indices[1::5]
sub2 = dwi_indices[2::5]
sub3 = dwi_indices[3::5]
sub4 = dwi_indices[4::5]
test0 = np.hstack((b0_indices, sub1, sub2, sub3, sub4))
test1 = np.hstack((b0_indices, sub0, sub2, sub3, sub4))
test2 = np.hstack((b0_indices, sub0, sub1, sub3, sub4))
test3 = np.hstack((b0_indices, sub0, sub1, sub2, sub4))
test4 = np.hstack((b0_indices, sub0, sub1, sub2, sub3))
cv_list = (
(sub0, test0),
(sub1, test1),
(sub2, test2),
(sub3, test3),
(sub4, test4)
)
errorlist = np.zeros((5, 21))
errorlist[:, 0] = 100.
optimal_alpha_sub = np.zeros(5)
for i, (sub, test) in enumerate(cv_list):
counter = 1
cv_old = errorlist[i, 0]
cv_new = errorlist[i, 0]
while cv_old >= cv_new and counter < weight_array.shape[0]:
alpha = weight_array[counter]
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M[test])
design_matrix_to_recover = cvxpy.Constant(M[sub])
data = cvxpy.Constant(E[test])
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data) +
alpha * cvxpy.norm1(c)
)
constraints = []
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver="ECOS", verbose=False)
recovered_signal = design_matrix_to_recover * c
errorlist[i, counter] = np.mean(
(E[sub] - np.asarray(recovered_signal.value).squeeze()) ** 2)
cv_old = errorlist[i, counter - 1]
cv_new = errorlist[i, counter]
counter += 1
optimal_alpha_sub[i] = weight_array[counter - 1]
optimal_alpha = optimal_alpha_sub.mean()
return optimal_alpha
def elastic_crossvalidation(b0s_mask, E, M, L, lopt,
weight_array=np.linspace(0, .2, 21)):
"""cross-validation function to find the optimal weight of alpha for
sparsity regularization when also Laplacian regularization is used."""
dwi_mask = ~b0s_mask
b0_mask = b0s_mask
dwi_indices = np.arange(E.shape[0])[dwi_mask]
b0_indices = np.arange(E.shape[0])[b0_mask]
random.shuffle(dwi_indices)
sub0 = dwi_indices[0::5]
sub1 = dwi_indices[1::5]
sub2 = dwi_indices[2::5]
sub3 = dwi_indices[3::5]
sub4 = dwi_indices[4::5]
test0 = np.hstack((b0_indices, sub1, sub2, sub3, sub4))
test1 = np.hstack((b0_indices, sub0, sub2, sub3, sub4))
test2 = np.hstack((b0_indices, sub0, sub1, sub3, sub4))
test3 = np.hstack((b0_indices, sub0, sub1, sub2, sub4))
test4 = np.hstack((b0_indices, sub0, sub1, sub2, sub3))
cv_list = (
(sub0, test0),
(sub1, test1),
(sub2, test2),
(sub3, test3),
(sub4, test4)
)
errorlist = np.zeros((5, 21))
errorlist[:, 0] = 100.
optimal_alpha_sub = np.zeros(5)
for i, (sub, test) in enumerate(cv_list):
counter = 1
cv_old = errorlist[i, 0]
cv_new = errorlist[i, 0]
c = cvxpy.Variable(M.shape[1])
design_matrix = cvxpy.Constant(M[test])
design_matrix_to_recover = cvxpy.Constant(M[sub])
data = cvxpy.Constant(E[test])
constraints = []
while cv_old >= cv_new and counter < weight_array.shape[0]:
alpha = weight_array[counter]
objective = cvxpy.Minimize(
cvxpy.sum_squares(design_matrix * c - data) +
alpha * cvxpy.norm1(c) +
lopt * cvxpy.quad_form(c, L)
)
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver="ECOS", verbose=False)
recovered_signal = design_matrix_to_recover * c
errorlist[i, counter] = np.mean(
(E[sub] - np.asarray(recovered_signal.value).squeeze()) ** 2)
cv_old = errorlist[i, counter - 1]
cv_new = errorlist[i, counter]
counter += 1
optimal_alpha_sub[i] = weight_array[counter - 1]
optimal_alpha = optimal_alpha_sub.mean()
return optimal_alpha
def visualise_gradient_table_G_Delta_rainbow(
gtab,
big_delta_start=None, big_delta_end=None, G_start=None, G_end=None,
bval_isolines=np.r_[0, 250, 1000, 2500, 5000, 7500, 10000, 14000],
alpha_shading=0.6):
"""This function visualizes a q-tau acquisition scheme as a function of
gradient strength and pulse separation (big_delta). It represents every
measurements at its G and big_delta position regardless of b-vector, with a
background of b-value isolines for reference. It assumes there is only one
unique pulse length (small_delta) in the acquisition scheme.
Parameters
----------
gtab : GradientTable object
constructed gradient table with big_delta and small_delta given as
inputs.
big_delta_start : float,
optional minimum big_delta that is plotted in seconds
big_delta_end : float,
optional maximum big_delta that is plotted in seconds
G_start : float,
optional minimum gradient strength that is plotted in T/m
G_end : float,
optional maximum gradient strength taht is plotted in T/m
bval_isolines : array,
optional array of bvalue isolines that are plotted in the background
alpha_shading : float between [0-1]
optional shading of the bvalue colors in the background
"""
Delta = gtab.big_delta # in seconds
delta = gtab.small_delta # in seconds
G = gtab.gradient_strength * 1e3 # in SI units T/m
if len(np.unique(delta)) > 1:
msg = "This acquisition has multiple small_delta values. "
msg += "This visualization assumes there is only one small_delta."
raise ValueError(msg)
if big_delta_start is None:
big_delta_start = 0.005
if big_delta_end is None:
big_delta_end = Delta.max() + 0.004
if G_start is None:
G_start = 0.
if G_end is None:
G_end = G.max() + .05
Delta_ = np.linspace(big_delta_start, big_delta_end, 50)
G_ = np.linspace(G_start, G_end, 50)
Delta_grid, G_grid = np.meshgrid(Delta_, G_)
dummy_bvecs = np.tile([0, 0, 1], (len(G_grid.ravel()), 1))
gtab_grid = gradient_table_from_gradient_strength_bvecs(
G_grid.ravel() / 1e3, dummy_bvecs, Delta_grid.ravel(), delta[0]
)
bvals_ = gtab_grid.bvals.reshape(G_grid.shape)
plt.contourf(Delta_, G_, bvals_,
levels=bval_isolines,
cmap='rainbow', alpha=alpha_shading)
cb = plt.colorbar(spacing="proportional")
cb.ax.tick_params(labelsize=16)
plt.scatter(Delta, G, c='k', s=25)
plt.xlim(big_delta_start, big_delta_end)
plt.ylim(G_start, G_end)
cb.set_label('b-value ($s$/$mm^2$)', fontsize=18)
plt.xlabel(r'Pulse Separation $\Delta$ [sec]', fontsize=18)
plt.ylabel('Gradient Strength [T/m]', fontsize=18)
return None
|
FrancoisRheaultUS/dipy
|
dipy/reconst/qtdmri.py
|
Python
|
bsd-3-clause
| 84,043
|
[
"Gaussian"
] |
56078a25f3145bef060ab0a24c7784832b2d346fdbb0f736d5ca67aba94ae216
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## GNU GENERAL PUBLIC LICENSE
## Version 2, June 1991
## Copyright (C) 1989, 1991 Free Software Foundation, Inc.
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
## Everyone is permitted to copy and distribute verbatim copies
## of this license document, but changing it is not allowed.
## Preamble
## The licenses for most software are designed to take away your
##freedom to share and change it. By contrast, the GNU General Public
##License is intended to guarantee your freedom to share and change free
##software--to make sure the software is free for all its users. This
##General Public License applies to most of the Free Software
##Foundation's software and to any other program whose authors commit to
##using it. (Some other Free Software Foundation software is covered by
##the GNU Library General Public License instead.) You can apply it to
##your programs, too.
## When we speak of free software, we are referring to freedom, not
##price. Our General Public Licenses are designed to make sure that you
##have the freedom to distribute copies of free software (and charge for
##this service if you wish), that you receive source code or can get it
##if you want it, that you can change the software or use pieces of it
##in new free programs; and that you know you can do these things.
## To protect your rights, we need to make restrictions that forbid
##anyone to deny you these rights or to ask you to surrender the rights.
##These restrictions translate to certain responsibilities for you if you
##distribute copies of the software, or if you modify it.
## For example, if you distribute copies of such a program, whether
##gratis or for a fee, you must give the recipients all the rights that
##you have. You must make sure that they, too, receive or can get the
##source code. And you must show them these terms so they know their
##rights.
## We protect your rights with two steps: (1) copyright the software, and
##(2) offer you this license which gives you legal permission to copy,
##distribute and/or modify the software.
## Also, for each author's protection and ours, we want to make certain
##that everyone understands that there is no warranty for this free
##software. If the software is modified by someone else and passed on, we
##want its recipients to know that what they have is not the original, so
##that any problems introduced by others will not reflect on the original
##authors' reputations.
## Finally, any free program is threatened constantly by software
##patents. We wish to avoid the danger that redistributors of a free
##program will individually obtain patent licenses, in effect making the
##program proprietary. To prevent this, we have made it clear that any
##patent must be licensed for everyone's free use or not licensed at all.
## The precise terms and conditions for copying, distribution and
##modification follow.
## GNU GENERAL PUBLIC LICENSE
## TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
## 0. This License applies to any program or other work which contains
##a notice placed by the copyright holder saying it may be distributed
##under the terms of this General Public License. The "Program", below,
##refers to any such program or work, and a "work based on the Program"
##means either the Program or any derivative work under copyright law:
##that is to say, a work containing the Program or a portion of it,
##either verbatim or with modifications and/or translated into another
##language. (Hereinafter, translation is included without limitation in
##the term "modification".) Each licensee is addressed as "you".
##Activities other than copying, distribution and modification are not
##covered by this License; they are outside its scope. The act of
##running the Program is not restricted, and the output from the Program
##is covered only if its contents constitute a work based on the
##Program (independent of having been made by running the Program).
##Whether that is true depends on what the Program does.
## 1. You may copy and distribute verbatim copies of the Program's
##source code as you receive it, in any medium, provided that you
##conspicuously and appropriately publish on each copy an appropriate
##copyright notice and disclaimer of warranty; keep intact all the
##notices that refer to this License and to the absence of any warranty;
##and give any other recipients of the Program a copy of this License
##along with the Program.
##You may charge a fee for the physical act of transferring a copy, and
##you may at your option offer warranty protection in exchange for a fee.
## 2. You may modify your copy or copies of the Program or any portion
##of it, thus forming a work based on the Program, and copy and
##distribute such modifications or work under the terms of Section 1
##above, provided that you also meet all of these conditions:
## a) You must cause the modified files to carry prominent notices
## stating that you changed the files and the date of any change.
## b) You must cause any work that you distribute or publish, that in
## whole or in part contains or is derived from the Program or any
## part thereof, to be licensed as a whole at no charge to all third
## parties under the terms of this License.
## c) If the modified program normally reads commands interactively
## when run, you must cause it, when started running for such
## interactive use in the most ordinary way, to print or display an
## announcement including an appropriate copyright notice and a
## notice that there is no warranty (or else, saying that you provide
## a warranty) and that users may redistribute the program under
## these conditions, and telling the user how to view a copy of this
## License. (Exception: if the Program itself is interactive but
## does not normally print such an announcement, your work based on
## the Program is not required to print an announcement.)
##These requirements apply to the modified work as a whole. If
##identifiable sections of that work are not derived from the Program,
##and can be reasonably considered independent and separate works in
##themselves, then this License, and its terms, do not apply to those
##sections when you distribute them as separate works. But when you
##distribute the same sections as part of a whole which is a work based
##on the Program, the distribution of the whole must be on the terms of
##this License, whose permissions for other licensees extend to the
##entire whole, and thus to each and every part regardless of who wrote it.
##Thus, it is not the intent of this section to claim rights or contest
##your rights to work written entirely by you; rather, the intent is to
##exercise the right to control the distribution of derivative or
##collective works based on the Program.
##In addition, mere aggregation of another work not based on the Program
##with the Program (or with a work based on the Program) on a volume of
##a storage or distribution medium does not bring the other work under
##the scope of this License.
## 3. You may copy and distribute the Program (or a work based on it,
##under Section 2) in object code or executable form under the terms of
##Sections 1 and 2 above provided that you also do one of the following:
## a) Accompany it with the complete corresponding machine-readable
## source code, which must be distributed under the terms of Sections
## 1 and 2 above on a medium customarily used for software interchange; or,
## b) Accompany it with a written offer, valid for at least three
## years, to give any third party, for a charge no more than your
## cost of physically performing source distribution, a complete
## machine-readable copy of the corresponding source code, to be
## distributed under the terms of Sections 1 and 2 above on a medium
## customarily used for software interchange; or,
## c) Accompany it with the information you received as to the offer
## to distribute corresponding source code. (This alternative is
## allowed only for noncommercial distribution and only if you
## received the program in object code or executable form with such
## an offer, in accord with Subsection b above.)
##The source code for a work means the preferred form of the work for
##making modifications to it. For an executable work, complete source
##code means all the source code for all modules it contains, plus any
##associated interface definition files, plus the scripts used to
##control compilation and installation of the executable. However, as a
##special exception, the source code distributed need not include
##anything that is normally distributed (in either source or binary
##form) with the major components (compiler, kernel, and so on) of the
##operating system on which the executable runs, unless that component
##itself accompanies the executable.
##If distribution of executable or object code is made by offering
##access to copy from a designated place, then offering equivalent
##access to copy the source code from the same place counts as
##distribution of the source code, even though third parties are not
##compelled to copy the source along with the object code.
## 4. You may not copy, modify, sublicense, or distribute the Program
##except as expressly provided under this License. Any attempt
##otherwise to copy, modify, sublicense or distribute the Program is
##void, and will automatically terminate your rights under this License.
##However, parties who have received copies, or rights, from you under
##this License will not have their licenses terminated so long as such
##parties remain in full compliance.
## 5. You are not required to accept this License, since you have not
##signed it. However, nothing else grants you permission to modify or
##distribute the Program or its derivative works. These actions are
##prohibited by law if you do not accept this License. Therefore, by
##modifying or distributing the Program (or any work based on the
##Program), you indicate your acceptance of this License to do so, and
##all its terms and conditions for copying, distributing or modifying
##the Program or works based on it.
## 6. Each time you redistribute the Program (or any work based on the
##Program), the recipient automatically receives a license from the
##original licensor to copy, distribute or modify the Program subject to
##these terms and conditions. You may not impose any further
##restrictions on the recipients' exercise of the rights granted herein.
##You are not responsible for enforcing compliance by third parties to
##this License.
## 7. If, as a consequence of a court judgment or allegation of patent
##infringement or for any other reason (not limited to patent issues),
##conditions are imposed on you (whether by court order, agreement or
##otherwise) that contradict the conditions of this License, they do not
##excuse you from the conditions of this License. If you cannot
##distribute so as to satisfy simultaneously your obligations under this
##License and any other pertinent obligations, then as a consequence you
##may not distribute the Program at all. For example, if a patent
##license would not permit royalty-free redistribution of the Program by
##all those who receive copies directly or indirectly through you, then
##the only way you could satisfy both it and this License would be to
##refrain entirely from distribution of the Program.
##If any portion of this section is held invalid or unenforceable under
##any particular circumstance, the balance of the section is intended to
##apply and the section as a whole is intended to apply in other
##circumstances.
##It is not the purpose of this section to induce you to infringe any
##patents or other property right claims or to contest validity of any
##such claims; this section has the sole purpose of protecting the
##integrity of the free software distribution system, which is
##implemented by public license practices. Many people have made
##generous contributions to the wide range of software distributed
##through that system in reliance on consistent application of that
##system; it is up to the author/donor to decide if he or she is willing
##to distribute software through any other system and a licensee cannot
##impose that choice.
##This section is intended to make thoroughly clear what is believed to
##be a consequence of the rest of this License.
## 8. If the distribution and/or use of the Program is restricted in
##certain countries either by patents or by copyrighted interfaces, the
##original copyright holder who places the Program under this License
##may add an explicit geographical distribution limitation excluding
##those countries, so that distribution is permitted only in or among
##countries not thus excluded. In such case, this License incorporates
##the limitation as if written in the body of this License.
## 9. The Free Software Foundation may publish revised and/or new versions
##of the General Public License from time to time. Such new versions will
##be similar in spirit to the present version, but may differ in detail to
##address new problems or concerns.
##Each version is given a distinguishing version number. If the Program
##specifies a version number of this License which applies to it and "any
##later version", you have the option of following the terms and conditions
##either of that version or of any later version published by the Free
##Software Foundation. If the Program does not specify a version number of
##this License, you may choose any version ever published by the Free Software
##Foundation.
## 10. If you wish to incorporate parts of the Program into other free
##programs whose distribution conditions are different, write to the author
##to ask for permission. For software which is copyrighted by the Free
##Software Foundation, write to the Free Software Foundation; we sometimes
##make exceptions for this. Our decision will be guided by the two goals
##of preserving the free status of all derivatives of our free software and
##of promoting the sharing and reuse of software generally.
## NO WARRANTY
## 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
##FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
##OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
##PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
##OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
##MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
##TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
##PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
##REPAIR OR CORRECTION.
## 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
##WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
##REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
##INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
##OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
##TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
##YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
##PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
##POSSIBILITY OF SUCH DAMAGES.
## END OF TERMS AND CONDITIONS
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
import sys
sys.path.append('/usr/lib/python/')
sys.path.append('/usr/lib64/python2.4/site-packages')
sys.path.append('/usr/lib64/python2.4/site-packages/gtk-2.0')
#sys.path.append(os.environ['CUON_PATH'])
#try:
import pygtk
#except:
# print 'No python-module pygtk found. please install first'
# sys.exit(0)
import os.path
pygtk.require('2.0')
import gtk
import gtk.glade
import cuon.Addresses.addresses
import cuon.Articles.articles
import cuon.Bank.bank
try:
import cuon.Clients.clients
except Exception, params:
print 'import failed'
print Exception, params
import cuon.Biblio.biblio
import cuon.Order.order
import cuon.User
import cuon.Preferences.preferences
import cuon.PrefsFinance.prefsFinance
import cuon.Stock.stock
import cuon.DMS.dms
import cuon.Databases.databases
import cuon.XML.MyXML
from cuon.TypeDefs.typedefs import typedefs
from cuon.Windows.windows import windows
import cuon.Login.login
import cPickle
import cuon.Databases.dumps
from cuon.TypeDefs.typedefs_server import typedefs_server
import cuon.Databases.cyr_load_table
import threading
import cuon.VTK.mainLogo
import cuon.VTK.test
import cuon.WebShop.webshop
import cuon.AI.ai
import cuon.Staff.staff
import cuon.Project.project
import cuon.Finances.cashAccountBook
import cuon.Databases.import_generic1
import cuon.Databases.import_generic2
import commands
import cuon.Help.help
# localisation
import locale, gettext
import time
#http connections
import httplib, urllib
try:
import profile
except:
print "no Profile"
class MainWindow(windows):
"""
@author: Juergen Hamel
@organization: Cyrus-Computer GmbH, D-32584 Loehne
@copyright: by Juergen Hamel
@license: GPL ( GNU GENERAL PUBLIC LICENSE )
@contact: jh@cyrus.de
"""
def __init__(self, sT):
windows.__init__(self)
self.sStartType = sT
self.Version = {'Major': 0, 'Minor': 32, 'Rev': 4, 'Species': 0, 'Maschine': 'Linux,Windows'}
self.sTitle = _("Client PyCuon for C.U.O.N. Version ") + `self.Version['Major']` + '.' + `self.Version['Minor']` + '.' + `self.Version['Rev']`
self.allTables = {}
self.sDebug = 'NO'
self.ModulNumber = self.MN['Mainwindow']
self.extMenucommand = {}
#self.extMenucommand['ext1'] = 'Test'
#set this Functions to None
def loadUserInfo(self):
pass
def checkClient(self):
pass
def on_end1_activate(self,event):
print "exit cuon"
#clean up the tmp-files
try:
os.system( 'rm ' + os.path.normpath(os.environ['CUON_HOME'] + '/cuon__*' ))
except Exception, params:
#print Exception, params
pass
try:
os.system( 'rm ' + os.path.normpath(self.dicUser['prefPath']['tmp'] + '/*__dms*' ))
except Exception, params:
#print Exception, params
pass
try:
os.system( 'rm ' + os.path.normpath( os.environ['CUON_HOME'] + '/*__dms*' ))
except Exception, params:
#print Exception, params
pass
except Exception, params:
#print Exception, params
pass
self.gtk_main_quit()
def on_databases1_activate(self,event):
daba = cuon.Databases.databases.databaseswindow()
def on_login1_activate(self,event):
lgi = cuon.Login.login.loginwindow( [self.getWidget('eUserName')])
self.openDB()
self.oUser = self.loadObject('User')
self.closeDB()
if self.oUser.getUserName()== 'EMPTY':
pass
else:
self.getWidget('eServer').set_text(self.td.server)
#choose the client
self.on_clients1_activate(None)
self.checkMenus()
def checkMenus(self):
liModullist = self.rpc.callRP('User.getModulList', self.oUser.getSqlDicUser())
print liModullist
if self.sStartType == 'server':
self.enableMenuItem('serverMode')
self.disableMenuItem('user')
self.enableMenuItem('login')
misc_menu = False
print 'LI_MODULELIST'
print `liModullist`
for iL in liModullist:
print iL
if iL.has_key('all'):
print 'key all found'
#data
self.addEnabledMenuItems('work','mi_addresses1')
self.addEnabledMenuItems('work','mi_articles1')
self.addEnabledMenuItems('work','mi_bibliographic')
self.addEnabledMenuItems('work','mi_clients1')
print 'enableMenuItem staff'
self.addEnabledMenuItems('work','mi_staff1')
print 'enableMenuItem staff end'
#action
self.addEnabledMenuItems('work','mi_order1')
self.addEnabledMenuItems('work','mi_stock1')
self.addEnabledMenuItems('work','mi_dms1')
#accounting
self.addEnabledMenuItems('work','mi_cash_account_book1')
# tools
self.addEnabledMenuItems('work','mi_expert_system1')
self.addEnabledMenuItems('work','mi_project1')
#extras
self.addEnabledMenuItems('work','mi_preferences1')
self.addEnabledMenuItems('work','mi_user1')
self.addEnabledMenuItems('work','mi_finances1')
self.addEnabledMenuItems('work','mi_project1')
self.addEnabledMenuItems('work','mi_import_data1')
self.enableMenuItem('work')
if iL.has_key('addresses'):
self.addEnabledMenuItems('misc','mi_addresses1')
misc_menu = True
if iL.has_key('articles'):
self.addEnabledMenuItems('misc','mi_articles1')
misc_menu = True
if iL.has_key('biblio'):
self.addEnabledMenuItems('misc','mi_bibliographic')
misc_menu = True
if iL.has_key('clients'):
self.addEnabledMenuItems('misc','mi_clients1')
misc_menu = True
if iL.has_key('staff'):
self.addEnabledMenuItems('misc','mi_staff1')
misc_menu = True
if iL.has_key('order'):
self.addEnabledMenuItems('misc','mi_order1')
misc_menu = True
if iL.has_key('stock'):
self.addEnabledMenuItems('misc','mi_stock1')
misc_menu = True
if iL.has_key('dms'):
self.addEnabledMenuItems('misc','mi_dms1')
misc_menu = True
if iL.has_key('account_book'):
self.addEnabledMenuItems('misc','mi_cash_account_book1')
misc_menu = True
if iL.has_key('expert_system'):
self.addEnabledMenuItems('misc','mi_expert_system1')
misc_menu = True
if iL.has_key('project'):
print 'key project found '
self.addEnabledMenuItems('misc','mi_project1')
misc_menu = True
print '-----------------------'
if iL.has_key('experimental'):
print 'key experimental found'
self.addEnabledMenuItems('experimental','mi_mayavi1')
self.addEnabledMenuItems('experimental','mi_test1')
self.enableMenuItem('experimental')
if iL.has_key('extendet_gpl'):
liExtGpl = iL['extendet_gpl']
print 'Ext.GPL =', liExtGpl
for newProgram in liExtGpl:
mi1 = self.addMenuItem(self.getWidget(newProgram['MenuItem']['Main']),newProgram['MenuItem']['Sub'])
print 'new Item = ', `mi1`
if newProgram['MenuItem']['ExternalNumber'] == 'ext1':
mi1.connect("activate", self.on_ext1_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext2':
mi1.connect("activate", self.on_ext2_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext3':
mi1.connect("activate", self.on_ext3_activate)
if newProgram.has_key('Imports'):
newImports = newProgram['Imports']
for nI in newImports:
try:
exec('import ' + nI)
print 'import', nI
except:
pass
if newProgram.has_key('MenuStart'):
print 'MenuStart = ', newProgram['MenuItem']['ExternalNumber']
self.extMenucommand[newProgram['MenuItem']['ExternalNumber']] = newProgram['MenuStart']
if newProgram.has_key('Start'):
exec(newProgram['Start'])
print 'EXEC = ', newProgram['Start']
if misc_menu:
self.enableMenuItem('misc')
def on_logout1_activate(self, event):
print 'Logout'
try:
self.rpc.callRP('Databases.logout', self.oUser.getUserName())
except:
print 'Exception'
self.disableMenuItem('login')
self.enableMenuItem('user')
def on_eUserName_changed(self, event):
if self.getWidget('eUserName').get_text() != 'EMPTY':
print 'User changed 22'
self.openDB()
self.oUser = self.loadObject('User')
print 'sDebug (Cuon) = ' + self.sDebug
self.oUser.setDebug(self.sDebug)
self.saveObject('User', self.oUser)
self.closeDB()
# self.openDB()
#if self.startProgressBar():
if not self.allTables:
self.generateLocalSqlObjects()
# self.stopProgressBar()
#print self.oUser.getDicUser()
def generateSqlObjects(self):
#self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables')
at = self.rpc.callRP('Database.getInfo', 'allTables')
#print 'at23 = ', `at`
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#sys.exit(0)
#print 'liAllTables = '
#print liAllTables
iCount = len(liAllTables)
for i in range(iCount):
self.loadSqlDefs(liAllTables, i)
self.setProgressBar(float(i) * 1.0/float(iCount) * 100.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def generateLocalSqlObjects(self):
at = self.rpc.callRP('Database.getInfo', 'allTables')
print 'at24 = ', `at`
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#liAllTables = cPickle.loads(self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables'))
print 'liAllTables = ', liAllTables
#print liAllTables
iCount = len(liAllTables)
print 'iCount = ', iCount
for i in range(iCount):
self.loadLocalSqlDefs(liAllTables, i)
#self.setProgressBar(float(i) * 1.0/float(iCount) * 100.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def loadSqlDefs(self, liAllTables, i ):
try:
clt = cuon.Databases.cyr_load_table.cyr_load_table()
self.allTables[liAllTables[i]] = clt.loadTable(liAllTables[i])
except:
print 'ERROR'
def loadLocalSqlDefs(self, liAllTables, i ):
#print 'loadLocalSQL1 ', liAllTables
#print 'loadLocalSQL2 ', i
clt = cuon.Databases.cyr_load_table.cyr_load_table()
self.allTables[liAllTables[i]] = clt.loadLocalTable(liAllTables[i])
#print 'loadLocalSQL3 ', `self.allTables`
# Data-Menu
#-->
def on_addresses1_activate(self,event):
adr = cuon.Addresses.addresses.addresswindow(self.allTables)
def on_articles1_activate(self,event):
art = cuon.Articles.articles.articleswindow(self.allTables)
def on_bank1_activate(self,event):
bank = cuon.Bank.bank.bankwindow(self.allTables)
#-->
def on_bibliographic_activate(self, event):
bib = cuon.Biblio.biblio.bibliowindow(self.allTables)
def on_clients1_activate(self, event):
cli = cuon.Clients.clients.clientswindow(self.allTables)
def on_staff1_activate(self, event):
staff = cuon.Staff.staff.staffwindow(self.allTables)
# Action-Menu
def on_order1_activate(self,event):
ord = cuon.Order.order.orderwindow(self.allTables)
def on_stock1_activate(self,event):
ord = cuon.Stock.stock.stockwindow(self.allTables)
def on_dms1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables)
# Finances
# Cash Account Book
def on_cash_account_book1_activate(self, event):
cab = cuon.Finances.cashAccountBook.cashAccountBookwindow(self.allTables)
# Extras
def on_expert_system1_activate(self, event):
cai = cuon.AI.ai.aiwindow(self.allTables)
# Extras
def on_project1_activate(self, event):
cpro = cuon.Project.project.projectwindow(self.allTables)
# Tools
def on_update1_activate(self, event):
self.updateVersion()
def on_pref_user1_activate(self,event):
prefs = cuon.Preferences.preferences.preferenceswindow(self.allTables)
def on_prefs_finances_activate(self,event):
prefs = cuon.PrefsFinance.prefsFinance.prefsFinancewindow(self.allTables)
def on_webshop1_activate(self,event):
print 'Webshop'
prefs = cuon.WebShop.webshop.webshopwindow(self.allTables)
def updateVersion(self):
if self.startProgressBar():
self.generateSqlObjects()
self.writeAllGladeFiles()
self.stopProgressBar()
def on_import_data1_activate(self, event):
imp1 = cuon.Databases.import_generic1.import_generic1(self.allTables)
def on_test1_activate(self, event):
te = cuon.VTK.test.test()
te.show()
def on_about1_activate(self, event):
about1 = self.getWidget('aCuon')
about1.show()
def on_onlinehelp_activate(self, event):
he1 = cuon.Help.help.helpwindow()
# hide about-info
def on_okAbout1_clicked(self, event):
about1 = self.getWidget('aCuon')
about1.hide()
# extendet Menu
# set by Zope user control
def on_ext1_activate(self, event):
print 'ext1 menu activated !!!!!'
ext1 = eval(self.extMenucommand['ext1'])
try:
ext1.start()
except:
print 'No StartModule'
def on_ext2_activate(self, event):
print 'ext2 menu activated !!!!!'
ext2 = eval(self.extMenucommand['ext2'])
try:
ext2.start()
except:
print 'No StartModule'
def on_ext3_activate(self, event):
print 'ext3 menu activated !!!!!'
ext3 = eval(self.extMenucommand['ext3'])
try:
ext3.start()
except:
print 'No StartModule'
def getNewClientSoftware(self, id):
cuonpath = self.td.cuon_path
self.infoMsg('C.U.O.N. will now try to load the new Clientversion. ')
shellcommand = 'rm ' + cuonpath + '/newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
shellcommand = 'rm -R ' + cuonpath + '/iClient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
sc = cuon.Databases.SingleCuon.SingleCuon(self.allTables)
sc.saveNewVersion(id)
shellcommand = 'cd '+cuonpath+' ; tar -xvjf newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
#shellcommand = 'sh ' + cuonpath + '/iClient/iCuon '
#liStatus = commands.getstatusoutput(shellcommand)
#print shellcommand, liStatus
self.infoMsg('Update complete. Please start C.U.O.N. new ')
def startMain(self, sStartType, sDebug,sLocal='NO'):
#ML = cuon.VTK.mainLogo.mainLogo()
#ML.startLogo()
if sDebug:
self.sDebug = sDebug
else:
self.sDebug = 'NO'
if sStartType == 'server':
print 'Server-Modus'
td = typedefs_server()
# create widget tree ...
self.gladeName = '/usr/share/cuon/glade/cuon.glade2'
self.loadGladeFile(self.gladeName)
else:
id, version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
print 'id', id
## self.openDB()
## version = self.loadObject('ProgramVersion')
## self.closeDB()
##
print 'Version:' + str(version)
print self.Version['Major'], version['Major']
print self.Version['Minor'], version['Minor']
print self.Version['Rev'], version['Rev']
print self.Version, version
if not version:
print 'no Version, please inform Cuon-Administrator'
sys.exit(0)
if self.rpc.callRP('Database.checkVersion', self.Version, version) == 'Wrong':
print ' ungleiche Versionen'
print 'load new version of pyCuon'
self.getNewClientSoftware(id)
self.openDB()
version = self.saveObject('newClientVersion',True)
self.closeDB()
sys.exit(0)
self.openDB()
newClientExist = self.loadObject('newClientVersion')
self.closeDB()
if newClientExist:
self.updateVersion()
self.openDB()
self.saveObject('ProgramVersion', self.Version)
version = self.saveObject('newClientVersion',False)
self.closeDB()
version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
if sLocal != 'NO' and self.rpc.callRP('Database.checkVersion', self.Version, version) == 'Wrong':
self.getNewClientSoftware(id)
sys.exit(0)
# create widget tree ...
# self.gladeName = td.main_glade_name
self.loadGlade('main.xml')
# Menu-items
self.initMenuItems()
self.disableAllMenuItems()
self.addEnabledMenuItems('login','logout1')
self.addEnabledMenuItems('login','data')
self.addEnabledMenuItems('login','action1')
self.addEnabledMenuItems('login','accounting1')
self.addEnabledMenuItems('login','extras')
self.addEnabledMenuItems('login','tools')
self.addEnabledMenuItems('serverMode','databases1')
self.addEnabledMenuItems('user','login1')
self.addEnabledMenuItems('user','tools')
self.addEnabledMenuItems('user','update1')
self.disableMenuItem('login')
self.disableMenuItem('serverMode')
self.enableMenuItem('user')
self.setTitle('window1',self.sTitle)
#self.updateVersion()
def gtk_main_quit(self):
gtk.main_quit()
sStartType = 'client'
sDebug = 'NO'
sLocal = 'NO'
print sys.argv
if len(sys.argv) > 4:
if len(sys.argv[4]) > 1:
sLocal = sys.argv[4]
if len(sys.argv) > 3:
if len(sys.argv[3]) > 1:
sDebug = sys.argv[3]
if len(sys.argv) > 2:
if len(sys.argv[2]) > 1:
sStartType = sys.argv[2]
print sStartType
if sStartType == 'server':
td = cuon.TypeDefs.typedefs_server.typedefs_server()
else:
td = cuon.TypeDefs.typedefs.typedefs()
if len(sys.argv) > 1:
if len(sys.argv[1]) > 1:
td.server = sys.argv[1]
print 'td-server =', td.server
d = cuon.Databases.dumps.dumps(td)
d.openDB()
d.saveObject('td', td)
d.closeDB()
if sLocal == 'NO':
DIR = '/usr/share/locale'
else:
DIR = sLocal
locale.setlocale (locale.LC_ALL, '')
APP = 'cuon'
gettext.bindtextdomain (APP, DIR)
gettext.textdomain (APP)
gettext.install (APP, DIR, unicode=1)
gtk.glade.bindtextdomain(APP,DIR)
gtk.glade.textdomain(APP)
print _('Debug by C.U.O.N. = ' ), sDebug
m = MainWindow(sStartType)
m.startMain(sStartType, sDebug,sLocal)
#profile.run('m.startMain(sStartType, sDebug,sLocal)','cuonprofile')
### Import Psyco if available
##try:
## import psyco
## psyco.full()
##except ImportError:
## pass
gtk.main()
|
CuonDeveloper/cuon
|
cuon_client/cuon_newclient/bin/Cuon.py
|
Python
|
gpl-3.0
| 37,879
|
[
"VTK"
] |
af475cc88cd3499403aaa4b704d6be3d17a730e5a93a9ea2eef165890a796401
|
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'),
('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'),
('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
## Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''),
('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
]
DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), builtin],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1])
if reprstr[i]=="'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
#self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self", None) ]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname, defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name, defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(_initMethod_initCheetah % {'className' : self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className #self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, basestring): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName)
if not isinstance(source, basestring):
source = unicode(source)
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.search(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, str):
encoding = unicodeMatch.group(1) or 'ascii'
source = unicode(source, encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
source = source.decode(encoding)
else:
source = unicode(source)
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
'try:',
' import builtins as builtin',
'except ImportError:',
' import __builtin__ as builtin',
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = baseClassName.split(',')
for klass in baseclasses:
chunks = klass.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline in the middle of a source block
# we don't want to inadvertantly import the module at the top of the file either
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if not var == '*']
self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
|
binhex/moviegrabber
|
lib/site-packages/Cheetah/Compiler.py
|
Python
|
gpl-3.0
| 80,397
|
[
"VisIt"
] |
fd3bf440ae43ce0e13ead4fab3fc194144ad657dbf54270c2055ccf9d933c92c
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
podaac_integration_example.py
Use OCW to download a PODACC dataset, evaluate and plot (contour map).
In this example:
1. Download a remote PO.DAAC (https://podaac.jpl.nasa.gov/) dataset
and read it into an OCW dataset object.
2. Create a temporal STD metric using one of the OCW standard metrics.
3. Evaluate the dataset against the metric and plot a contour map.
OCW modules demonstrated:
1. datasource/podaac_datasource
2. metrics
3. evaluation
4. plotter
"""
from __future__ import print_function
import ocw.data_source.podaac_datasource as podaac
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
datasetId = 'PODAAC-CCF30-01XXX'
variable = 'uwnd'
name = 'PO.DAAC_test_dataset'
OUTPUT_PLOT = "ccmp_temporal_std"
# Step 1: Download remote PO.DAAC Dataset and read it into an OCW Dataset Object.
print("Available Level4 PO.DAAC Granules: %s" % podaac.list_available_extract_granule_dataset_ids())
print("Extracting variable '%s' from Level4 granule '%s' and converting it into a OCW dataset."
% (variable, datasetId))
ccmp_dataset = podaac.extract_l4_granule(
variable=variable, dataset_id=datasetId, name=name)
print("CCMP_Dataset.values shape: (times, lats, lons) - %s \n" %
(ccmp_dataset.values.shape,))
# Acessing latittudes and longitudes of netCDF file
lats = ccmp_dataset.lats
lons = ccmp_dataset.lons
# Step 2: Build a Metric to use for Evaluation - Temporal STD for this example.
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Temporal STD metric to use for evaluation")
std = metrics.TemporalStdDev()
# Step 3: Create an Evaluation Object using Datasets and our Metric.
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists. Evaluation will iterate over the lists
print("Making the Evaluation definition")
# Temporal STD Metric gets one target dataset then reference dataset
# should be None
std_evaluation = evaluation.Evaluation(None, [ccmp_dataset], [std])
print("Executing the Evaluation using the object's run() method")
std_evaluation.run()
# Step 4: Make a Plot from the Evaluation.results.
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = std_evaluation.unary_results[0][0]
print("The results are of type: %s" % type(results))
print("Generating a contour map using ocw.plotter.draw_contour_map()")
fname = OUTPUT_PLOT
gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
plot_title = "Cross-Calibrated Multi-Platform Temporal Standard Deviation"
sub_titles = range(2002, 2010, 1)
plotter.draw_contour_map(results, lats, lons, fname,
gridshape=gridshape, ptitle=plot_title,
subtitles=sub_titles)
|
apache/climate
|
examples/podaac_integration_example.py
|
Python
|
apache-2.0
| 4,033
|
[
"NetCDF"
] |
f4d3887e409a5d7ce687924e9ce3c74d6070611c074022b6279846607f213b88
|
# Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>,
# Yang Xiang <yang.xiang@pmi.com>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D NumPy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D NumPy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (0, 3]. It's value is fixed for the life of the
object.
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
A `~numpy.random.RandomState`, `~numpy.random.Generator` object
for using the current state of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_gen):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_gen = rand_gen
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample, lower_sample = self.rand_gen.uniform(size=2)
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_gen.uniform()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_gen.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState(object):
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_gen, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = rand_gen.uniform(self.lower, self.upper,
size=len(self.lower))
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = rand_gen.uniform(self.lower,
self.upper,
size=self.lower.size)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain(object):
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
A `~numpy.random.RandomState` or `~numpy.random.Generator`
object for using the current state of the created random generator
container.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_gen, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_gen = rand_gen
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_gen.uniform()
pqv_temp = (self.acceptance_param - 1.0) * (
e - self.energy_state.current_energy) / (
self.temperature_step + 1.)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_gen.uniform():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper(object):
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper(object):
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres.keys():
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres.keys():
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int, `~numpy.random.RandomState`, `~numpy.random.Generator`}, optional
If `seed` is not specified the `~numpy.random.RandomState` singleton is
used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with `seed`.
If `seed` is already a ``RandomState`` or ``Generator`` instance, then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution function
and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occurred in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
>>> ret.x
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
-6.05775280e-09, -5.00668935e-09]) # may vary
>>> ret.fun
0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of RandomState for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
|
arokem/scipy
|
scipy/optimize/_dual_annealing.py
|
Python
|
bsd-3-clause
| 29,770
|
[
"VisIt"
] |
7a3590a1cefe9c726e400667846c302935c540ec9ab9f12ca9c237344e6c2cee
|
# creates: a1.png a2.png a3.png cnt1.png cnt2.png gnr1.png gnr2.png
from ase.io import write
from ase.structure import bulk, nanotube, graphene_nanoribbon
import numpy as np
for i, a in enumerate([
bulk('Cu', 'fcc', a=3.6),
bulk('Cu', 'fcc', a=3.6, orthorhombic=True),
bulk('Cu', 'fcc', a=3.6, cubic=True)]):
write('a%d.pov' % (i + 1), a,
show_unit_cell=2, display=False, run_povray=True)
cnt1 = nanotube(6, 0, length=4)
cnt1.rotate('x', 'z', rotate_cell=True)
cnt2 = nanotube(3, 3, length=6, bond=1.4, symbol='Si')
cnt2.rotate('x', 'z', rotate_cell=True)
for i, a in enumerate([cnt1, cnt2]):
write('cnt%d.pov' % (i + 1), a,
show_unit_cell=2, display=False, run_povray=True)
ind = [2, 0, 1]
gnr1 = graphene_nanoribbon(3, 4, type='armchair')
gnr1.set_cell(np.diag(gnr1.cell)[ind])
gnr1.positions = gnr1.positions[:, ind]
gnr2 = graphene_nanoribbon(2, 6, type='zigzag', saturated=True,
C_H=1.1, C_C=1.4, vacuum=3.0,
magnetic=True, initial_mag=1.12)
gnr2.set_cell(np.diag(gnr2.cell)[ind])
gnr2.positions = gnr2.positions[:, ind]
for i, a in enumerate([gnr1, gnr2]):
write('gnr%d.pov' % (i + 1), a,
show_unit_cell=2, display=False, run_povray=True)
|
slabanja/ase
|
doc/ase/structure.py
|
Python
|
gpl-2.0
| 1,259
|
[
"ASE"
] |
3cc511ecc82db12af7fbfecc61051ac4e11423aa9938ec4e27e190b9853c638c
|
# -*- coding: utf-8 -*-
""" Tests for student account views. """
from copy import copy
import re
from unittest import skipUnless
from urllib import urlencode
import mock
import ddt
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from edx_rest_api_client import exceptions
from nose.plugins.attrib import attr
from commerce.models import CommerceConfiguration
from commerce.tests import TEST_API_URL, TEST_API_SIGNING_KEY, factories
from commerce.tests.mocks import mock_get_orders
from course_modes.models import CourseMode
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme_context
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@attr('shard_3')
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
URLCONF_MODULES = ['embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# For these tests, three third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
self.configure_dummy_provider(
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(None, "signin_user"),
(None, "register_user"),
("edx.org", "signin_user"),
("edx.org", "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, theme, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
("signin_user", "dummy", "Dummy"),
("register_user", "dummy", "Dummy"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Microsite Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Microsite")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
]
HIDDEN_FIELDS = [
'time_zone',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_hidden_fields_not_visible(self):
"""
Test that hidden fields are not visible when disabled.
"""
temp_features = copy(settings.FEATURES)
temp_features['ENABLE_TIME_ZONE_PREFERENCE'] = False
with self.settings(FEATURES=temp_features):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
for attribute in self.HIDDEN_FIELDS:
self.assertIn('"%s": {"enabled": false' % (attribute), response.content)
def test_hidden_fields_are_visible(self):
"""
Test that hidden fields are visible when enabled.
"""
temp_features = copy(settings.FEATURES)
temp_features['ENABLE_TIME_ZONE_PREFERENCE'] = True
with self.settings(FEATURES=temp_features):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
for attribute in self.HIDDEN_FIELDS:
self.assertIn('"%s": {"enabled": true' % (attribute), response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config(program_listing_enabled=True)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(program_listing_enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
with mock_get_orders():
order_detail = get_user_orders(self.user)
user_order = mock_get_orders.default_response['results'][0]
expected = [
{
'number': user_order['number'],
'price': user_order['total_excl_tax'],
'title': user_order['lines'][0]['title'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/commerce/checkout/receipt/?orderNum=' + user_order['number']
}
]
self.assertEqual(order_detail, expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_honor_course_order_detail(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='honor'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_order_history_with_no_product(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=None
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(len(order_detail), 1)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
|
JioEducation/edx-platform
|
lms/djangoapps/student_account/test/test_views.py
|
Python
|
agpl-3.0
| 27,185
|
[
"VisIt"
] |
54426bdb47ffb72122cda4a810c7535f7410d0da90718f98908154048f585e03
|
import numpy as np
import time
import logging
import math
# Dimensions of the playing field
WORLD_X = 3000
WORLD_Y = 2000
BEAC_R = 40
BORDER = 15
BEACONS = np.array([[WORLD_X+BEAC_R+BORDER, WORLD_Y / 2.],
[-BEAC_R-BORDER, WORLD_Y + BEAC_R+BORDER],
[- BEAC_R-BORDER, - BEAC_R - BORDER]])
# parametres of lidar
MAX_ITENS = 3500 # MAX_ITENS 2600
MAX_DIST = 3700
BEAC_DIST_THRES = 200
class ParticleFilter:
def __init__(self, particles=500, sense_noise=50, distance_noise=30, angle_noise=0.02, in_x=150, in_y=150, in_angle=0.0, input_queue=None, out_queue=None,color='blue'):
global BEACONS
if(color =='blue'):
BEACONS = np.array([[-BEAC_R-BORDER, WORLD_Y / 2.], [(WORLD_X + BEAC_R+BORDER), (WORLD_Y + BEAC_R+BORDER)], [(WORLD_X + BEAC_R+BORDER), (- BEAC_R - BORDER)]])
stamp = time.time()
self.input_queue = input_queue
self.out_queue = out_queue
self.particles_num = particles
self.sense_noise = sense_noise
self.distance_noise = distance_noise
self.angle_noise = angle_noise
self.warning = False
self.last = (in_x,in_y,in_angle)
x = np.random.normal(in_x, distance_noise, particles)
y = np.random.normal(in_y, distance_noise, particles)
orient = np.random.normal(in_angle, angle_noise, particles) % (2 * np.pi)
self.particles = np.array([x, y, orient]).T # instead of np.vstack((x,y,orient)).T
logging.info('initialize time: '+str(time.time()-stamp))
# Added Andrei for debug
self.debug_info = []
self.start_time = time.time()
def gaus(self, x, mu=0, sigma=1):
"""calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma"""
return np.exp(- ((x-mu) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np.pi * (sigma ** 2))
def move_particles(self, delta, mode="npy"): # delta = [dx,dy,d_rot]
stamp = time.time()
x_noise = np.random.normal(0, self.distance_noise, self.particles_num)
y_noise = np.random.normal(0, self.distance_noise, self.particles_num)
angle_noise = np.random.normal(0, self.angle_noise, self.particles_num)
self.particles[:, 0] = self.particles[:, 0] + delta[0] + x_noise
self.particles[:, 1] = self.particles[:, 1] + delta[1] + y_noise
self.particles[:, 2] = (self.particles[:, 2] + delta[2] + angle_noise) % (2 * math.pi)
# START instead of
# NOT FASTER! NOT RIGHT((
# # self.particles + noise + delta:
# # noise - Nx3 : N - num particles, (x_noise, y_noise, angle_noise)
# self.particles += (np.random.multivariate_normal(mean=np.array([0, 0, 0]),
# cov=np.diag(np.array([self.distance_noise,
# self.distance_noise,
# self.angle_noise])),
# size=(self.particles_num))
# + np.array([delta]))
# self.particles[:, 2] %= 2 * np.pi
# END instead of
#logging.info('Particle Move time: ' + str(time.time() - stamp))
def resample(self, weights):
# OLD START
# n = self.particles_num
# indices = []
# C = [0.] + [sum(weights[:i + 1]) for i in range(n)]
# u0, j = np.random.rand(), 0
# for u in [(u0 + i) / n for i in range(n)]:
# START instead of
n = self.particles_num
weigths = np.array(weights)
indices = []
C = np.append([0.], np.cumsum(weigths))# [0.] + [sum(weights[:i + 1]) for i in range(n)]
j = 0
u0 = (np.random.rand() + np.arange(n))/n
for u in u0: #[(u0 + i) / n for i in range(n)
# END intsead of
while j < len(C) and u > C[j]:
j += 1
indices += [j - 1]
return indices
def calculate_main(self):
stamp = time.time()
x = np.mean(self.particles[:, 0])
y = np.mean(self.particles[:, 1])
zero_elem = self.particles[0, 2]
temporary = ((self.particles[:, 2]-zero_elem+np.pi) % (2.0 * np.pi))+zero_elem-np.pi
orient = np.mean(temporary)
answer = (x, y, orient)
#logging.info('main_calculation time' + str(time.time() - stamp))
#logging.info("Particle Filter coordinates: "+str(answer))
return answer
def particle_sense(self, scan):
stamp = time.time()
angle, distance = get_landmarks(scan)
x_coords, y_coords = p_trans(angle,distance)
weights = self.weights(x_coords,y_coords)
if self.warning:
return
x = np.random.normal(self.last[0], 150, self.particles_num)
y = np.random.normal(self.last[1], 150, self.particles_num)
orient = np.random.normal(self.last[2], np.pi, self.particles_num) % (2 * np.pi)
self.particles = np.array([x, y, orient]).T # instead of np.vstack((x,y,orient)).T
self.warning = False
logging.info('particle_sense time :' + str(time.time() - stamp) + " points: " + str(len(x_coords)))
return self.particles
self.particles = self.particles[self.resample(weights), :]
logging.info('particle_sense time :' + str(time.time() - stamp)+" points: "+str(len(x_coords)))
return self.particles
def weights(self, x_beac, y_beac):
"""Calculate particle weight based on its pose and lidar data"""
# TODO check ICP implementation
# BEACONS: from global BEACONS to particles local: (X, Y) - Nx3x2 matrices
res = BEACONS[np.newaxis, :, :] - self.particles[:, np.newaxis, :2]
X = ( res[:,:,0]*np.cos(self.particles[:,2])[:, np.newaxis]
+ res[:,:,1]*np.sin(self.particles[:,2])[:, np.newaxis])
Y = ( -res[:, :, 0] * np.sin(self.particles[:, 2])[:, np.newaxis]
+ res[:, :, 1] * np.cos(self.particles[:, 2])[:, np.newaxis])
beacon = np.concatenate((X[:, :, np.newaxis], Y[:, :, np.newaxis]), axis=2)
# beacon = beacons are in local coordinates of particles.
# distance from theoretical beacons to detected beacons from scan (x_beac, y_beac)
# ln1, ln2, ln3: NxM (M - number of detected beacons from scan)
ln1 = np.abs(np.sqrt((beacon[:, np.newaxis, 0, 0] - x_beac[np.newaxis, :])**2
+ (beacon[:, np.newaxis, 0, 1] - y_beac[np.newaxis, :])**2) - BEAC_R)
ln2 = np.abs(np.sqrt((beacon[:, np.newaxis, 1, 0] - x_beac[np.newaxis, :])**2
+ (beacon[:, np.newaxis, 1, 1] - y_beac[np.newaxis, :])**2) - BEAC_R)
ln3 = np.abs(np.sqrt((beacon[:, np.newaxis, 2, 0] - x_beac[np.newaxis, :])**2
+ (beacon[:, np.newaxis, 2, 1] - y_beac[np.newaxis, :])**2) - BEAC_R)
# lns are differences in theoretical and detected beacon data from lidar
# ln1,ln2,ln3 are correct computed OK)
# get minimal distance for each particle, its detected beacons to theoretical beacons
errors = np.minimum(ln1, np.minimum(ln2, ln3))
# too far real beacons go away: non valid
limit_err = errors > BEAC_DIST_THRES
# find map from detected beacon to closest real (valid distance neede)
error_l1 = np.logical_and(np.equal(errors, ln1), ~limit_err)
error_l2 = np.logical_and(np.equal(errors, ln2), ~limit_err)
error_l3 = np.logical_and(np.equal(errors, ln3), ~limit_err)
# bool error_li: sum to get number of particles for each of 3 beacons
err_l1 = np.sum(error_l1, axis=-1)
err_l2 = np.sum(error_l2, axis=-1)
err_l3 = np.sum(error_l3, axis=-1)
# find sum of errors near 3 beacons for each particle: beacon_error_sum Nx3
beacon_error_sum = np.ones([self.particles_num, 3], dtype=np.float)*1000
ind = np.where(err_l1)[0]
if ind.size:
beacon_error_sum[ind, 0] = np.sum(np.where(error_l1, errors, 0), axis=-1)[ind] / err_l1[ind]
ind = np.where(err_l2)[0]
if ind.size:
beacon_error_sum[ind, 1] = np.sum(np.where(error_l2, errors, 0), axis=-1)[ind] / err_l2[ind]
ind = np.where(err_l3)[0]
if ind.size:
beacon_error_sum[ind, 2] = np.sum(np.where(error_l3, errors, 0), axis=-1)[ind] / err_l3[ind]
# weights of particles are estimated via errors got from scan of beacons and theoretical beacons location
# median version
#weights = self.gaus(np.median(beacon_error_sum, axis=1),mu=0, sigma=self.sense_noise)
# mean version
weights = self.gaus(np.mean(beacon_error_sum, axis=1),mu=0, sigma=self.sense_noise)
# check weights
if np.sum(weights)<self.gaus(self.sense_noise*5.0,mu =0,sigma= self.sense_noise)*self.particles_num:
logging.info("Dangerous Situation")
#self.warning = True
if np.sum(weights) > 0:
weights /= np.sum(weights)
else:
weights = np.ones(self.particles_num, dtype=np.float)/self.particles_num
return weights
# TODO try use median instead mean
# TODO if odometry works very bad and weights are small use only lidar
def send_command(self,name,params=None):
self.input_queue.put({'source':'loc','cmd':name,'params':params})
return self.out_queue.get()
def localisation(self, localisation,shared_coords,get_raw):
time.sleep(0.5)
#time.sleep(50)
while True:
if localisation.value:
tmstmp = time.time() - self.start_time
coords = self.send_command('getCurrentCoordinates')['data']
if coords and type(coords[0]) is not type(100.):
logging.critical("Incorrect coordinates format")
continue
coords[0] = coords[0]*1000
coords[1] = coords[1]*1000
self.move_particles(
[coords[0] - shared_coords[0], coords[1] - shared_coords[1], coords[2] - shared_coords[2]])
# add aproximation
lidar_data = get_raw()
self.particle_sense(lidar_data)
if self.warning:
x = np.random.normal(self.last[0], 200, self.particles_num)
y = np.random.normal(self.last[1], 200, self.particles_num)
orient = np.random.normal(self.last[2], np.pi, self.particles_num) % (2 * np.pi)
self.particles = np.array([x, y, orient]).T # instead of np.vstack((x,y,orient)).T
self.warning = False
self.particle_sense(lidar_data)
self.move_particles([0, 0, 0])
self.particle_sense(lidar_data)
self.move_particles([0, 0, 0])
main_robot = self.calculate_main()
self.last = main_robot
shared_coords[0] = main_robot[0]
shared_coords[1] = main_robot[1]
shared_coords[2] = main_robot[2]
#logging.info(self.send_command('setCoordinates',[shared_coords[0] / 1000., shared_coords[1] / 1000., shared_coords[2]]))
time.sleep(0.1)
#logging.info("Odometry coords: " + str(list(coords[:2]
#+ [np.rad2deg(coords[2])])))
#logging.info("Particel Filter coords: " + str(shared_coords[:2]
#+ [np.rad2deg(shared_coords[2])]))
# help functions
def get_landmarks(scan):
"""Returns filtrated lidar data"""
stamp = time.time()
ind = np.where(np.logical_and(scan[:, 1] > MAX_ITENS, scan[:, 0] < MAX_DIST))[0]
angles = np.pi / 4 / 180 * ind
distances = scan[ind, 0]
#logging.info('scan preproccesing time: ' + str(time.time() - stamp))
return (angles + np.pi / 4) % (2 * np.pi), distances # delete +np.pi for our robot ANDREW you NEED return (angles + np.pi / 4 + np.pi) % (2 * np.pi), distances
def p_trans(agl, pit):
x_beac = pit*np.cos(agl) # multiply by minus in our robot
y_beac = pit*np.sin(agl)
return x_beac,y_beac
|
SkRobo/Eurobot-2017
|
NewCommunication/npParticle.py
|
Python
|
mit
| 12,287
|
[
"Gaussian"
] |
5aa6de848f3aaae4856b137ea8533478a8374d8eea7213f31163688b1cc19081
|
import pytest # noqa
import os
import utilities
from ..automation import CommandSequence
from ..automation import TaskManager
expected_lso_content_a = [
1, # visit id
u'localtest.me',
u'FlashCookie.sol',
u'localtest.me/FlashCookie.sol',
u'test_key',
u'REPLACEME']
expected_lso_content_b = [
2, # visit id
u'localtest.me',
u'FlashCookie.sol',
u'localtest.me/FlashCookie.sol',
u'test_key',
u'REPLACEME']
expected_js_cookie = (
1, # visit id
u'%s' % utilities.BASE_TEST_URL_DOMAIN,
u'test_cookie',
u'Test-0123456789',
u'%s' % utilities.BASE_TEST_URL_DOMAIN,
u'/')
class TestStorageVectors():
""" Runs some basic tests to check that the saving of
storage vectors (i.e. Flash LSOs, profile cookies) works.
NOTE: These tests are very basic and should be expanded
on to check for completeness and correctness.
"""
NUM_BROWSERS = 1
def get_config(self, data_dir):
manager_params, browser_params = TaskManager.load_default_params(self.NUM_BROWSERS)
manager_params['data_directory'] = data_dir
manager_params['log_directory'] = data_dir
manager_params['db'] = os.path.join(manager_params['data_directory'],
manager_params['database_name'])
browser_params[0]['headless'] = True
return manager_params, browser_params
def test_flash_cookies(self, tmpdir):
""" Check that some Flash LSOs are saved and
are properly keyed in db."""
# Run the test crawl
manager_params, browser_params = self.get_config(str(tmpdir))
browser_params[0]['disable_flash'] = False
manager = TaskManager.TaskManager(manager_params, browser_params)
# Get a site we know sets Flash cookies and visit it twice
lso_value_a = utilities.rand_str(8)
expected_lso_content_a[5] = lso_value_a # we'll expect this to be present
qry_str = '?lso_test_key=%s&lso_test_value=%s' % ("test_key",
lso_value_a)
test_url_a = utilities.BASE_TEST_URL + '/lso/setlso.html' + qry_str
cs = CommandSequence.CommandSequence(test_url_a)
cs.get(sleep=3, timeout=120)
cs.dump_flash_cookies()
manager.execute_command_sequence(cs)
lso_value_b = utilities.rand_str(8)
expected_lso_content_b[5] = lso_value_b # we'll expect this to be present
qry_str = '?lso_test_key=%s&lso_test_value=%s' % ("test_key",
lso_value_b)
test_url_b = utilities.BASE_TEST_URL + '/lso/setlso.html' + qry_str
cs = CommandSequence.CommandSequence(test_url_b)
cs.get(sleep=3, timeout=120)
cs.dump_flash_cookies()
manager.execute_command_sequence(cs)
manager.close()
# Check that some flash cookies are recorded
qry_res = utilities.query_db(manager_params['db'],
"SELECT * FROM flash_cookies")
lso_count = len(qry_res)
assert lso_count == 2
lso_content_a = list(qry_res[0][2:]) # Remove first two items
lso_content_b = list(qry_res[1][2:]) # Remove first two items
# remove randomly generated LSO directory name
# e.g. TY2FOJUG/localtest.me/Flash.sol -> localtest.me/Flash.sol
lso_content_a[3] = lso_content_a[3].split("/", 1)[-1] # remove LSO dirname
lso_content_b[3] = lso_content_b[3].split("/", 1)[-1] # remove LSO dirname
assert lso_content_a == expected_lso_content_a
assert lso_content_b == expected_lso_content_b
def test_profile_cookies(self, tmpdir):
""" Check that some profile cookies are saved """
# Run the test crawl
manager_params, browser_params = self.get_config(str(tmpdir))
manager = TaskManager.TaskManager(manager_params, browser_params)
# TODO update this to local test site
url = 'http://www.yahoo.com'
cs = CommandSequence.CommandSequence(url)
cs.get(sleep=3, timeout=120)
cs.dump_profile_cookies()
manager.execute_command_sequence(cs)
manager.close()
# Check that some flash cookies are recorded
qry_res = utilities.query_db(manager_params['db'],
"SELECT COUNT(*) FROM profile_cookies")
prof_cookie_count = qry_res[0]
assert prof_cookie_count > 0
def test_js_profile_cookies(self, tmpdir):
""" Check that profile cookies set by JS are saved """
# Run the test crawl
manager_params, browser_params = self.get_config(str(tmpdir))
manager = TaskManager.TaskManager(manager_params, browser_params)
url = utilities.BASE_TEST_URL + "/js_cookie.html"
cs = CommandSequence.CommandSequence(url)
cs.get(sleep=3, timeout=120)
cs.dump_profile_cookies()
manager.execute_command_sequence(cs)
manager.close()
# Check that the JS cookie we stored is recorded
qry_res = utilities.query_db(manager_params['db'], "SELECT * FROM profile_cookies")
assert len(qry_res) == 1 # we store only one cookie
cookies = qry_res[0] # take the first cookie
# compare URL, domain, name, value, origin, path
assert cookies[2:8] == expected_js_cookie
|
tommybananas/OpenWPM
|
test/test_storage_vectors.py
|
Python
|
gpl-3.0
| 5,581
|
[
"VisIt"
] |
95fa96cece78a84b1c6e0a17e295e85a7bcdc6e20e39872398a7bf0a8708a279
|
import numpy as np
import scipy.special as sps
from numpy.testing import (run_module_suite,
assert_,
assert_equal,
assert_raises,
assert_array_almost_equal)
from dipy.denoise.localpca import (localpca, mppca, genpca, _pca_classifier)
from dipy.sims.voxel import multi_tensor
from dipy.core.gradients import gradient_table, generate_bvecs
def setup_module():
global gtab
# generate a gradient table for phantom data
directions8 = generate_bvecs(8)
directions30 = generate_bvecs(30)
directions60 = generate_bvecs(60)
# Create full dataset parameters
# (6 b-values = 0, 8 directions for b-value 300, 30 directions for b-value
# 1000 and 60 directions for b-value 2000)
bvals = np.hstack((np.zeros(6),
300 * np.ones(8),
1000 * np.ones(30),
2000 * np.ones(60)))
bvecs = np.vstack((np.zeros((6, 3)),
directions8, directions30, directions60))
gtab = gradient_table(bvals, bvecs)
def rfiw_phantom(gtab, snr=None):
"""rectangle fiber immersed in water"""
# define voxel index
slice_ind = np.zeros((10, 10, 8))
slice_ind[4:7, 4:7, :] = 1
slice_ind[4:7, 7, :] = 2
slice_ind[7, 7, :] = 3
slice_ind[7, 4:7, :] = 4
slice_ind[7, 3, :] = 5
slice_ind[4:7, 3, :] = 6
slice_ind[3, 3, :] = 7
slice_ind[3, 4:7, :] = 8
slice_ind[3, 7, :] = 9
# Define tissue diffusion parameters
# Restricted diffusion
ADr = 0.99e-3
RDr = 0.0
# Hindered diffusion
ADh = 2.26e-3
RDh = 0.87e-3
# S0 value for tissue
S1 = 50
# Fraction between Restricted and Hindered diffusion
fia = 0.51
# Define water diffusion
Dwater = 3e-3
S2 = 100 # S0 value for water
# Define tissue volume fraction for each voxel type (in index order)
f = np.array([0., 1., 0.6, 0.18, 0.30, 0.15, 0.50, 0.35, 0.70, 0.42])
# Define S0 for each voxel (in index order)
S0 = S1 * f + S2 * (1 - f)
# multi tensor simulations assume that each water pull as constant S0
# since I am assuming that tissue and water voxels have different S0,
# tissue volume fractions have to be adjusted to the measured f values when
# constant S0 are assumed constant. Doing this correction, simulations will
# be analogous to simulates that S0 are different for each media. (For more
# details on this contact the phantom designer)
f1 = f * S1 / S0
mevals = np.array([[ADr, RDr, RDr], [ADh, RDh, RDh],
[Dwater, Dwater, Dwater]])
angles = [(0, 0, 1), (0, 0, 1), (0, 0, 1)]
DWI = np.zeros(slice_ind.shape + (gtab.bvals.size, ))
for i in range(10):
fractions = [f1[i] * fia * 100, f1[i] *
(1 - fia) * 100, (1 - f1[i]) * 100]
sig, direction = multi_tensor(gtab, mevals, S0=S0[i], angles=angles,
fractions=fractions, snr=None)
DWI[slice_ind == i, :] = sig
if snr is None:
return DWI
else:
sigma = S2 * 1.0 / snr
n1 = np.random.normal(0, sigma, size=DWI.shape)
n2 = np.random.normal(0, sigma, size=DWI.shape)
return [np.sqrt((DWI / np.sqrt(2) + n1)**2 +
(DWI / np.sqrt(2) + n2)**2), sigma]
def test_lpca_static():
S0 = 100 * np.ones((20, 20, 20, 20), dtype='f8')
S0ns = localpca(S0, sigma=np.ones((20, 20, 20), dtype=np.float64))
assert_array_almost_equal(S0, S0ns)
def test_lpca_random_noise():
S0 = 100 + 2 * np.random.standard_normal((22, 23, 30, 20))
S0ns = localpca(S0, sigma=np.std(S0))
assert_(S0ns.min() > S0.min())
assert_(S0ns.max() < S0.max())
assert_equal(np.round(S0ns.mean()), 100)
def test_lpca_boundary_behaviour():
# check is first slice is getting denoised or not ?
S0 = 100 * np.ones((20, 20, 20, 20), dtype='f8')
S0[:, :, 0, :] = S0[:, :, 0, :] + 2 * \
np.random.standard_normal((20, 20, 20))
S0_first = S0[:, :, 0, :]
S0ns = localpca(S0, sigma=np.std(S0))
S0ns_first = S0ns[:, :, 0, :]
rmses = np.sum(np.abs(S0ns_first - S0_first)) / \
(100.0 * 20.0 * 20.0 * 20.0)
# shows that S0n_first is not very close to S0_first
assert_(rmses > 0.0001)
assert_equal(np.round(S0ns_first.mean()), 100)
rmses = np.sum(np.abs(S0ns_first - S0_first)) / \
(100.0 * 20.0 * 20.0 * 20.0)
# shows that S0n_first is not very close to S0_first
assert_(rmses > 0.0001)
assert_equal(np.round(S0ns_first.mean()), 100)
def test_lpca_rmse():
S0_w_noise = 100 + 2 * np.random.standard_normal((22, 23, 30, 20))
rmse_w_noise = np.sqrt(np.mean((S0_w_noise - 100) ** 2))
S0_denoised = localpca(S0_w_noise, sigma=np.std(S0_w_noise))
rmse_denoised = np.sqrt(np.mean((S0_denoised - 100) ** 2))
# Denoising should always improve the RMSE:
assert_(rmse_denoised < rmse_w_noise)
def test_lpca_sharpness():
S0 = np.ones((30, 30, 30, 20), dtype=np.float64) * 100
S0[10:20, 10:20, 10:20, :] = 50
S0[20:30, 20:30, 20:30, :] = 0
S0 = S0 + 20 * np.random.standard_normal((30, 30, 30, 20))
S0ns = localpca(S0, sigma=20.0)
# check the edge gradient
edgs = np.abs(np.mean(S0ns[8, 10:20, 10:20] - S0ns[12, 10:20, 10:20]) - 50)
assert_(edgs < 2)
def test_lpca_dtype():
# If out_dtype is not specified, we retain the original precision:
S0 = 200 * np.ones((20, 20, 20, 3), dtype=np.float64)
S0ns = localpca(S0, sigma=1)
assert_equal(S0.dtype, S0ns.dtype)
S0 = 200 * np.ones((20, 20, 20, 20), dtype=np.uint16)
S0ns = localpca(S0, sigma=np.ones((20, 20, 20)))
assert_equal(S0.dtype, S0ns.dtype)
# If we set out_dtype, we get what we asked for:
S0 = 200 * np.ones((20, 20, 20, 20), dtype=np.uint16)
S0ns = localpca(S0, sigma=np.ones((20, 20, 20)),
out_dtype=np.float32)
assert_equal(np.float32, S0ns.dtype)
# If we set a few entries to zero, this induces negative entries in the
# Resulting denoised array:
S0[5:8, 5:8, 5:8] = 0
# But if we should always get all non-negative results:
S0ns = localpca(S0, sigma=np.ones((20, 20, 20)), out_dtype=np.uint16)
assert_(np.all(S0ns >= 0))
# And no wrap-around to crazy high values:
assert_(np.all(S0ns <= 200))
def test_lpca_wrong():
S0 = np.ones((20, 20))
assert_raises(ValueError, localpca, S0, sigma=1)
def test_phantom():
DWI_clean = rfiw_phantom(gtab, snr=None)
DWI, sigma = rfiw_phantom(gtab, snr=30)
# To test without Rician correction
temp = (DWI_clean / sigma)**2
DWI_clean_wrc = (sigma * np.sqrt(np.pi / 2) * np.exp(-0.5 * temp) *
((1 + 0.5 * temp) * sps.iv(0, 0.25 * temp) + 0.5 * temp *
sps.iv(1, 0.25 * temp))**2)
DWI_den = localpca(DWI, sigma, patch_radius=3)
rmse_den = np.sum(np.abs(DWI_clean - DWI_den)) / np.sum(np.abs(DWI_clean))
rmse_noisy = np.sum(np.abs(DWI_clean - DWI)) / np.sum(np.abs(DWI_clean))
rmse_den_wrc = np.sum(np.abs(DWI_clean_wrc - DWI_den)
) / np.sum(np.abs(DWI_clean_wrc))
rmse_noisy_wrc = np.sum(np.abs(DWI_clean_wrc - DWI)) / \
np.sum(np.abs(DWI_clean_wrc))
assert_(np.max(DWI_clean) / sigma < np.max(DWI_den) / sigma)
assert_(np.max(DWI_den) / sigma < np.max(DWI) / sigma)
assert_(rmse_den < rmse_noisy)
assert_(rmse_den_wrc < rmse_noisy_wrc)
# Check if the results of different PCA methods (eig, svd) are similar
DWI_den_svd = localpca(DWI, sigma, pca_method='svd', patch_radius=3)
assert_array_almost_equal(DWI_den, DWI_den_svd)
assert_raises(ValueError, localpca, DWI, sigma, pca_method='empty')
# Try this with a sigma volume, instead of a scalar
sigma_vol = sigma * np.ones(DWI.shape[:-1])
mask = np.zeros_like(DWI, dtype=bool)[..., 0]
mask[2:-2, 2:-2, 2:-2] = True
DWI_den = localpca(DWI, sigma_vol, mask, patch_radius=3)
DWI_clean_masked = DWI_clean.copy()
DWI_clean_masked[~mask] = 0
DWI_masked = DWI.copy()
DWI_masked[~mask] = 0
rmse_den = np.sum(np.abs(DWI_clean_masked - DWI_den)) / np.sum(np.abs(
DWI_clean_masked))
rmse_noisy = np.sum(np.abs(DWI_clean_masked - DWI_masked)) / np.sum(np.abs(
DWI_clean_masked))
DWI_clean_wrc_masked = DWI_clean_wrc.copy()
DWI_clean_wrc_masked[~mask] = 0
rmse_den_wrc = np.sum(np.abs(DWI_clean_wrc_masked - DWI_den)
) / np.sum(np.abs(DWI_clean_wrc_masked))
rmse_noisy_wrc = np.sum(np.abs(DWI_clean_wrc_masked - DWI_masked)) / \
np.sum(np.abs(DWI_clean_wrc_masked))
assert_(np.max(DWI_clean) / sigma < np.max(DWI_den) / sigma)
assert_(np.max(DWI_den) / sigma < np.max(DWI) / sigma)
assert_(rmse_den < rmse_noisy)
assert_(rmse_den_wrc < rmse_noisy_wrc)
def test_lpca_ill_conditioned():
DWI, sigma = rfiw_phantom(gtab, snr=30)
for patch_radius in [1, [1, 1, 1]]:
assert_raises(ValueError, localpca, DWI, sigma,
patch_radius=patch_radius)
def test_lpca_radius_wrong_shape():
DWI, sigma = rfiw_phantom(gtab, snr=30)
for patch_radius in [[2, 2], [2, 2, 2, 2]]:
assert_raises(ValueError, localpca, DWI, sigma,
patch_radius=patch_radius)
def test_lpca_sigma_wrong_shape():
DWI, sigma = rfiw_phantom(gtab, snr=30)
# If sigma is 3D but shape is not like DWI.shape[:-1], an error is raised:
sigma = np.zeros((DWI.shape[0], DWI.shape[1] + 1, DWI.shape[2]))
assert_raises(ValueError, localpca, DWI, sigma)
def test_pca_classifier():
# Produce small phantom with well aligned single voxels and ground truth
# snr = 50, i.e signal std = 0.02 (Gaussian noise)
std_gt = 0.02
S0 = 1.0
ndir = gtab.bvals.size
signal_test = np.zeros((5, 5, 5, ndir))
mevals = np.array([[0.99e-3, 0.0, 0.0], [2.26e-3, 0.87e-3, 0.87e-3]])
sig, direction = multi_tensor(gtab, mevals, S0=S0,
angles=[(0, 0, 1), (0, 0, 1)],
fractions=(50, 50), snr=None)
signal_test[..., :] = sig
noise = std_gt*np.random.standard_normal((5, 5, 5, ndir))
dwi_test = signal_test + noise
# Compute eigenvalues
X = dwi_test.reshape(125, ndir)
M = np.mean(X, axis=0)
X = X - M
[L, W] = np.linalg.eigh(np.dot(X.T, X)/125)
# Find number of noise related eigenvalues
var, c = _pca_classifier(L, 125)
std = np.sqrt(var)
# Expected number of signal components is 0 because phantom only has one
# voxel type and that information is campured by the mean of X.
# Therefore, expected noise components should be equal to size of L.
# To allow some margin of error let's assess if c is higher than
# L.size - 3.
assert_(c > L.size-3)
# Let's check if noise std estimate as an error less than 5%
std_error = abs(std - std_gt)/std_gt * 100
assert_(std_error < 5)
def test_mppca_in_phantom():
DWIgt = rfiw_phantom(gtab, snr=None)
std_gt = 0.02
noise = std_gt*np.random.standard_normal(DWIgt.shape)
DWInoise = DWIgt + noise
DWIden = mppca(DWInoise, patch_radius=2)
# Test if denoised data is closer to ground truth than noisy data
rmse_den = np.sum(np.abs(DWIgt - DWIden)) / np.sum(np.abs(DWIgt))
rmse_noisy = np.sum(np.abs(DWIgt - DWInoise)) / np.sum(np.abs(DWIgt))
assert_(rmse_den < rmse_noisy)
def test_mppca_returned_sigma():
DWIgt = rfiw_phantom(gtab, snr=None)
std_gt = 0.02
noise = std_gt*np.random.standard_normal(DWIgt.shape)
DWInoise = DWIgt + noise
# Case that sigma is estimated using mpPCA
DWIden0, sigma = mppca(DWInoise, patch_radius=2, return_sigma=True)
msigma = np.mean(sigma)
std_error = abs(msigma - std_gt)/std_gt * 100
assert_(std_error < 5)
# Case that sigma is inputed (sigma outputed should be the same as the one
# inputed)
DWIden1, rsigma = genpca(DWInoise, sigma=sigma, tau_factor=None,
patch_radius=2, return_sigma=True)
assert_array_almost_equal(rsigma, sigma)
# DWIden1 should be very similar to DWIden0
rmse_den = np.sum(np.abs(DWIden1 - DWIden0)) / np.sum(np.abs(DWIden0))
rmse_ref = np.sum(np.abs(DWIden1 - DWIgt)) / np.sum(np.abs(DWIgt))
assert_(rmse_den < rmse_ref)
if __name__ == '__main__':
run_module_suite()
|
FrancoisRheaultUS/dipy
|
dipy/denoise/tests/test_lpca.py
|
Python
|
bsd-3-clause
| 12,484
|
[
"Gaussian"
] |
b104383447ca689cf376a4cf184d6e89a5734f6ba2c30544378f1ae103e7db9d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections.
- Or just query existing selections.
version_added: "1.6"
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
support: full
platforms: debian
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
- It is highly recommended to add I(no_log=True) to task while handling sensitive information using this module.
requirements:
- debconf
- debconf-utils
options:
name:
description:
- Name of package to configure.
type: str
required: true
aliases: [ pkg ]
question:
description:
- A debconf configuration setting.
type: str
aliases: [ selection, setting ]
vtype:
description:
- The type of the value supplied.
- It is highly recommended to add I(no_log=True) to task while specifying I(vtype=password).
- C(seen) was added in Ansible 2.2.
type: str
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
value:
description:
- Value to set the configuration to.
type: str
aliases: [ answer ]
unseen:
description:
- Do not set 'seen' flag when pre-seeding.
type: bool
default: false
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = r'''
- name: Set default locale to fr_FR.UTF-8
ansible.builtin.debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
- name: Set to generate locales
ansible.builtin.debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
- name: Accept oracle license
ansible.builtin.debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: 'true'
vtype: select
- name: Specifying package you can register/return the list of questions and current values
ansible.builtin.debconf:
name: tzdata
- name: Pre-configure tripwire site passphrase
ansible.builtin.debconf:
name: tripwire
question: tripwire/site-passphrase
value: "{{ site_passphrase }}"
vtype: password
no_log: True
'''
RETURN = r'''#'''
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[key.strip('*').strip()] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
value=dict(type='str', aliases=['answer']),
unseen=dict(type='bool', default=False),
),
required_together=(['question', 'vtype', 'value'],),
supports_check_mode=True,
)
# TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
# if question doesn't exist, value cannot match
if question not in prev:
changed = True
else:
existing = prev[question]
# ensure we compare booleans supplied to the way debconf sees them (true/false strings)
if vtype == 'boolean':
value = to_text(value).lower()
existing = to_text(prev[question]).lower()
if value != existing:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = {question: value}
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
|
nitzmahone/ansible
|
lib/ansible/modules/debconf.py
|
Python
|
gpl-3.0
| 6,261
|
[
"Brian"
] |
45a85af6c869cd06de2701a4e11233f35a88912bd7cb3b3dc638d039ed936779
|
import os
import sys
import subprocess
import threading
import errno
import unittest
from pysam import AlignmentFile
from TestUtils import BAM_DATADIR
IS_PYTHON2 = sys.version_info[0] == 2
def alignmentfile_writer_thread(infile, outfile):
def _writer_thread(infile, outfile):
"""read from infile and write to outfile"""
try:
i = 0
for record in infile:
outfile.write(record)
i += 1
except IOError as e:
if e.errno != errno.EPIPE:
pass
finally:
outfile.close()
writer = threading.Thread(target=_writer_thread, args=(infile, outfile))
writer.daemon = True
writer.start()
return writer
class StreamTest(unittest.TestCase):
def stream_process(self, proc, in_stream, out_stream, writer):
with AlignmentFile(proc.stdout) as infile:
read = 0
for record in infile:
read += 1
return 0, read
@unittest.skipIf(IS_PYTHON2, "no context manager in py2")
def test_text_processing(self):
with subprocess.Popen('head -n200',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True) as proc:
in_stream = AlignmentFile(os.path.join(BAM_DATADIR, 'ex1.bam'))
out_stream = AlignmentFile(
proc.stdin, 'wh', header=in_stream.header)
writer = alignmentfile_writer_thread(in_stream,
out_stream)
written, read = self.stream_process(proc,
in_stream,
out_stream,
writer)
self.assertEqual(read, 198)
@unittest.skip("test contains bug")
def test_samtools_processing(self):
# The following test causes the suite to hang
# as the stream_processor raises:
# ValueError: file has no sequences defined (mode='r') - is it SAM/BAM format?
# The whole setup then hangs during exception handling.
with subprocess.Popen('samtools view -b -f 4',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True) as proc:
in_stream = AlignmentFile(os.path.join(BAM_DATADIR, 'ex1.bam'))
out_stream = AlignmentFile(
proc.stdin, 'wb', header=in_stream.header)
writer = alignmentfile_writer_thread(in_stream,
out_stream)
written, read = self.stream_process(proc,
in_stream,
out_stream,
writer)
self.assertEqual(read, 35)
if __name__ == "__main__":
unittest.main()
|
kyleabeauchamp/pysam
|
tests/StreamFiledescriptors_test.py
|
Python
|
mit
| 3,024
|
[
"pysam"
] |
916a4300fb52f9d51d1b63c36c9b290b880f6bf90909b724cbf9cf70ccc9efa5
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import copy
import numpy
import unittest
from pyscf import gto
from pyscf import scf
from pyscf import lib
mol = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = '631g',
)
mf = scf.dhf.UHF(mol)
mf.conv_tol_grad = 1e-5
mf.kernel()
h4 = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
H 0 0 1
H 1 1 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = ('sto3g', [[1,[0.3,1]]]),
)
def tearDownModule():
global mol, mf, h4
mol.stdout.close()
h4.stdout.close()
del mol, mf, h4
class KnownValues(unittest.TestCase):
def test_init_guess_minao(self):
dm = scf.dhf.get_init_guess(mol, key='minao')
self.assertAlmostEqual(abs(dm).sum(), 14.859714177083553, 9)
def test_init_guess_huckel(self):
dm = scf.dhf.DHF(mol).get_init_guess(mol, key='huckel')
self.assertAlmostEqual(lib.fp(dm), (-0.6090467376579871-0.08968155321478456j), 9)
def test_get_hcore(self):
h = mf.get_hcore()
self.assertAlmostEqual(numpy.linalg.norm(h), 129.81389477933607, 7)
def test_get_ovlp(self):
s = mf.get_ovlp()
self.assertAlmostEqual(numpy.linalg.norm(s), 6.9961451281502809, 9)
def test_1e(self):
mf = scf.dhf.HF1e(mol)
self.assertAlmostEqual(mf.scf(), -23.888778707255078, 7)
def test_analyze(self):
(pop, chg), dip = mf.analyze()
self.assertAlmostEqual(numpy.linalg.norm(pop), 2.2858506185320837, 6)
def test_scf(self):
self.assertAlmostEqual(mf.e_tot, -76.038520455193833, 6)
def test_energy_tot(self):
e = mf.energy_tot(mf.make_rdm1())
self.assertAlmostEqual(e, mf.e_tot, 9)
def test_get_grad(self):
g = mf.get_grad(mf.mo_coeff, mf.mo_occ)
self.assertAlmostEqual(abs(g).max(), 0, 5)
if scf.dhf.zquatev:
def test_rhf(self):
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = '631g',
)
mf = scf.dhf.RHF(mol)
mf.with_ssss = False
mf.conv_tol_grad = 1e-5
self.assertAlmostEqual(mf.kernel(), -76.03852477545016, 8)
mf.ssss_approx = None
mf.conv_tol_grad = 1e-5
self.assertAlmostEqual(mf.kernel(), -76.03852480744785, 8)
def test_get_veff(self):
n4c = mol.nao_2c() * 2
numpy.random.seed(1)
dm = numpy.random.random((n4c,n4c))+numpy.random.random((n4c,n4c))*1j
dm = dm + dm.T.conj()
v = mf.get_veff(mol, dm)
self.assertAlmostEqual(lib.fp(v), (-21.613084684028077-28.50754366262467j), 8)
mf1 = copy.copy(mf)
mf1.direct_scf = False
v1 = mf1.get_veff(mol, dm)
self.assertAlmostEqual(abs(v-v1).max(), 0, 9)
def test_get_jk(self):
n2c = h4.nao_2c()
n4c = n2c * 2
c1 = .5 / lib.param.LIGHT_SPEED
eri0 = numpy.zeros((n4c,n4c,n4c,n4c), dtype=numpy.complex128)
eri0[:n2c,:n2c,:n2c,:n2c] = h4.intor('int2e_spinor')
eri0[n2c:,n2c:,:n2c,:n2c] = h4.intor('int2e_spsp1_spinor') * c1**2
eri0[:n2c,:n2c,n2c:,n2c:] = eri0[n2c:,n2c:,:n2c,:n2c].transpose(2,3,0,1)
ssss = h4.intor('int2e_spsp1spsp2_spinor') * c1**4
eri0[n2c:,n2c:,n2c:,n2c:] = ssss
numpy.random.seed(1)
dm = numpy.random.random((2,n4c,n4c))+numpy.random.random((2,n4c,n4c))*1j
dm = dm + dm.transpose(0,2,1).conj()
vj0 = numpy.einsum('ijkl,lk->ij', eri0, dm[0])
vk0 = numpy.einsum('ijkl,jk->il', eri0, dm[0])
vj, vk = scf.dhf.get_jk(h4, dm[0], hermi=1, coulomb_allow='SSSS')
self.assertTrue(numpy.allclose(vj0, vj))
self.assertTrue(numpy.allclose(vk0, vk))
vj0 = numpy.einsum('ijkl,xlk->xij', ssss, dm[:,n2c:,n2c:])
vk0 = numpy.einsum('ijkl,xjk->xil', ssss, dm[:,n2c:,n2c:])
vj, vk = scf.dhf._call_veff_ssss(h4, dm, hermi=0)
self.assertTrue(numpy.allclose(vj0, vj))
self.assertTrue(numpy.allclose(vk0, vk))
eri0[n2c:,n2c:,n2c:,n2c:] = 0
vj0 = numpy.einsum('ijkl,xlk->xij', eri0, dm)
vk0 = numpy.einsum('ijkl,xjk->xil', eri0, dm)
vj, vk = scf.dhf.get_jk(h4, dm, hermi=1, coulomb_allow='SSLL')
self.assertTrue(numpy.allclose(vj0, vj))
self.assertTrue(numpy.allclose(vk0, vk))
eri0[n2c:,n2c:,:n2c,:n2c] = 0
eri0[:n2c,:n2c,n2c:,n2c:] = 0
vj0 = numpy.einsum('ijkl,lk->ij', eri0, dm[0])
vk0 = numpy.einsum('ijkl,jk->il', eri0, dm[0])
vj, vk = scf.dhf.get_jk(h4, dm[0], hermi=0, coulomb_allow='LLLL')
self.assertTrue(numpy.allclose(vj0, vj))
self.assertTrue(numpy.allclose(vk0, vk))
def test_get_jk_with_gaunt_breit_high_cost(self):
n2c = h4.nao_2c()
n4c = n2c * 2
c1 = .5 / lib.param.LIGHT_SPEED
eri0 = numpy.zeros((n4c,n4c,n4c,n4c), dtype=numpy.complex128)
eri0[:n2c,:n2c,:n2c,:n2c] = h4.intor('int2e_spinor')
eri0[n2c:,n2c:,:n2c,:n2c] = h4.intor('int2e_spsp1_spinor') * c1**2
eri0[:n2c,:n2c,n2c:,n2c:] = eri0[n2c:,n2c:,:n2c,:n2c].transpose(2,3,0,1)
eri0[n2c:,n2c:,n2c:,n2c:] = h4.intor('int2e_spsp1spsp2_spinor') * c1**4
numpy.random.seed(1)
dm = numpy.random.random((2,n4c,n4c))+numpy.random.random((2,n4c,n4c))*1j
dm = dm + dm.transpose(0,2,1).conj()
eri1 = eri0.copy()
eri0 -= _fill_gaunt(h4, h4.intor('int2e_ssp1ssp2_spinor') * c1**2)
vj0 = numpy.einsum('ijkl,xlk->xij', eri0, dm)
vk0 = numpy.einsum('ijkl,xjk->xil', eri0, dm)
mf = scf.dhf.DHF(h4)
mf.with_gaunt = True
vj1, vk1 = mf.get_jk(h4, dm, hermi=1)
self.assertTrue(numpy.allclose(vj0, vj1))
self.assertTrue(numpy.allclose(vk0, vk1))
eri1 += _fill_gaunt(h4, h4.intor('int2e_breit_ssp1ssp2_spinor', comp=1) * c1**2)
vj0 = numpy.einsum('ijkl,xlk->xij', eri1, dm)
vk0 = numpy.einsum('ijkl,xjk->xil', eri1, dm)
mf.with_breit = True
vj1, vk1 = mf.get_jk(h4, dm, hermi=1)
self.assertTrue(numpy.allclose(vj0, vj1))
self.assertTrue(numpy.allclose(vk0, vk1))
def test_gaunt(self):
erig = _fill_gaunt(h4, h4.intor('int2e_ssp1ssp2_spinor'))
n4c = erig.shape[0]
numpy.random.seed(1)
dm = numpy.random.random((2,n4c,n4c))+numpy.random.random((2,n4c,n4c))*1j
dm = dm + dm.transpose(0,2,1).conj()
c1 = .5 / lib.param.LIGHT_SPEED
vj0 = -numpy.einsum('ijkl,xlk->xij', erig, dm) * c1**2
vk0 = -numpy.einsum('ijkl,xjk->xil', erig, dm) * c1**2
vj1, vk1 = scf.dhf._call_veff_gaunt_breit(h4, dm)
self.assertTrue(numpy.allclose(vj0, vj1))
self.assertTrue(numpy.allclose(vk0, vk1))
# def test_breit(self):
# mol = gto.M(atom='Cl',
# basis={'Cl': gto.parse('''
# Cl S 5.5 1.0
# Cl P 9.053563477 1.0''')},
# charge=9)
# mf = mol.DHF().set(with_breit=True)
# mf.run()
# self.assertTrue(mf.e_tot, -234.888983310961, 8)
#
# mf.with_ssss = False
# mf.run()
# self.assertTrue(mf.e_tot, -234.888999687936, 8)
def test_breit_high_cost(self):
erig = _fill_gaunt(h4, h4.intor('int2e_breit_ssp1ssp2_spinor', comp=1))
n4c = erig.shape[0]
numpy.random.seed(1)
dm = numpy.random.random((n4c,n4c))+numpy.random.random((n4c,n4c))*1j
dm = dm + dm.T.conj()
c1 = .5 / lib.param.LIGHT_SPEED
vj0 = numpy.einsum('ijkl,lk->ij', erig, dm) * c1**2
vk0 = numpy.einsum('ijkl,jk->il', erig, dm) * c1**2
vj1, vk1 = scf.dhf._call_veff_gaunt_breit(h4, dm, with_breit=True)
self.assertTrue(numpy.allclose(vj0, vj1))
self.assertTrue(numpy.allclose(vk0, vk1))
def test_time_rev_matrix(self):
s = mol.intor_symmetric('int1e_ovlp_spinor')
ts = scf.dhf.time_reversal_matrix(mol, s)
self.assertTrue(numpy.allclose(s, ts))
def test_get_occ(self):
mo_energy = mf.mo_energy.copy()
n2c = mo_energy.size // 2
mo_energy[n2c] -= lib.param.LIGHT_SPEED**2*2
mo_energy[n2c+6] -= lib.param.LIGHT_SPEED**2*2
occ = mf.get_occ(mo_energy)
self.assertEqual(list(occ[n2c:]),
[0.,1.,1.,1.,1.,1.,0.,1.,1.,1.,1.,1.,
0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
def test_x2c(self):
mfx2c = mf.x2c().run()
self.assertAlmostEqual(mfx2c.e_tot, -76.032703699443999, 9)
def test_h2_sto3g(self):
# There was a bug of cache size in lib/vhf/r_direct.c for minimal
# system
mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='sto3g', verbose=0)
e = mol.DHF().kernel()
self.assertAlmostEqual(e, -1.066122658859047, 12)
def _fill_gaunt(mol, erig):
n2c = erig.shape[0]
n4c = n2c * 2
tao = numpy.asarray(mol.time_reversal_map())
idx = abs(tao)-1 # -1 for C indexing convention
sign_mask = tao<0
eri0 = numpy.zeros((n4c,n4c,n4c,n4c), dtype=numpy.complex128)
eri0[:n2c,n2c:,:n2c,n2c:] = erig # ssp1ssp2
eri2 = erig.take(idx,axis=0).take(idx,axis=1) # sps1ssp2
eri2[sign_mask,:] *= -1
eri2[:,sign_mask] *= -1
eri2 = -eri2.transpose(1,0,2,3)
eri0[n2c:,:n2c,:n2c,n2c:] = eri2
eri2 = erig.take(idx,axis=2).take(idx,axis=3) # ssp1sps2
eri2[:,:,sign_mask,:] *= -1
eri2[:,:,:,sign_mask] *= -1
eri2 = -eri2.transpose(0,1,3,2)
#self.assertTrue(numpy.allclose(eri0, eri2))
eri0[:n2c,n2c:,n2c:,:n2c] = eri2
eri2 = erig.take(idx,axis=0).take(idx,axis=1)
eri2 = eri2.take(idx,axis=2).take(idx,axis=3) # sps1sps2
eri2 = eri2.transpose(1,0,2,3)
eri2 = eri2.transpose(0,1,3,2)
eri2[sign_mask,:] *= -1
eri2[:,sign_mask] *= -1
eri2[:,:,sign_mask,:] *= -1
eri2[:,:,:,sign_mask] *= -1
eri0[n2c:,:n2c,n2c:,:n2c] = eri2
return eri0
if __name__ == "__main__":
print("Full Tests for dhf")
unittest.main()
|
sunqm/pyscf
|
pyscf/scf/test/test_dhf.py
|
Python
|
apache-2.0
| 11,076
|
[
"PySCF"
] |
f35f52617ab9ea6f53ce532d7b752f36d4ae3f7ea65b6cf7e24dfacc67dc7417
|
import logging, re, json, commands, os, copy
from datetime import datetime, timedelta
import time
import json
import copy
import itertools, random
import string as strm
import math
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qs
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, render, redirect
from django.template import RequestContext, loader
from django.db.models import Count
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.cache import patch_cache_control, patch_response_headers
from django.db.models import Q
from django.core.cache import cache
from django.utils import encoding
from django.conf import settings as djangosettings
from django.db import connection, transaction
from core.common.utils import getPrefix, getContextVariables, QuerySetChain
from core.settings import STATIC_URL, FILTER_UI_ENV, defaultDatetimeFormat
from core.pandajob.models import PandaJob, Jobsactive4, Jobsdefined4, Jobswaiting4, Jobsarchived4, Jobsarchived, \
GetRWWithPrioJedi3DAYS, RemainedEventsPerCloud3dayswind, Getfailedjobshspecarch, Getfailedjobshspec, JobsWorldView
from schedresource.models import Schedconfig
from core.common.models import Filestable4
from core.common.models import Datasets
from core.common.models import Sitedata
from core.common.models import FilestableArch
from core.common.models import Users
from core.common.models import Jobparamstable
from core.common.models import Metatable
from core.common.models import Logstable
from core.common.models import Jobsdebug
from core.common.models import Cloudconfig
from core.common.models import Incidents
from core.common.models import Pandalog
from core.common.models import JediJobRetryHistory
from core.common.models import JediTasks
from core.common.models import GetEventsForTask
from core.common.models import JediTaskparams
from core.common.models import JediEvents
from core.common.models import JediDatasets
from core.common.models import JediDatasetContents
from core.common.models import JediWorkQueue
from core.common.models import RequestStat
from core.settings.config import ENV
from time import gmtime, strftime
from settings.local import dbaccess
import string as strm
from django.views.decorators.cache import cache_page
import ErrorCodes
errorFields = []
errorCodes = {}
errorStages = {}
from django.template.defaulttags import register
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
try:
hostname = commands.getoutput('hostname')
if hostname.find('.') > 0: hostname = hostname[:hostname.find('.')]
except:
hostname = ''
callCount = 0
homeCloud = {}
objectStores = {}
pandaSites = {}
cloudList = [ 'CA', 'CERN', 'DE', 'ES', 'FR', 'IT', 'ND', 'NL', 'RU', 'TW', 'UK', 'US' ]
statelist = [ 'defined', 'waiting', 'pending', 'assigned', 'throttled', \
'activated', 'sent', 'starting', 'running', 'holding', \
'transferring', 'finished', 'failed', 'cancelled', 'merging', 'closed']
sitestatelist = [ 'defined', 'waiting', 'assigned', 'throttled', 'activated', 'sent', 'starting', 'running', 'holding', 'merging', 'transferring', 'finished', 'failed', 'cancelled' ]
eventservicestatelist = [ 'ready', 'sent', 'running', 'finished', 'cancelled', 'discarded', 'done', 'failed' ]
taskstatelist = [ 'registered', 'defined', 'assigning', 'ready', 'pending', 'scouting', 'scouted', 'running', 'prepared', 'done', 'failed', 'finished', 'aborting', 'aborted', 'finishing', 'topreprocess', 'preprocessing', 'tobroken', 'broken', 'toretry', 'toincexec', 'rerefine' ]
taskstatelist_short = [ 'reg', 'def', 'assgn', 'rdy', 'pend', 'scout', 'sctd', 'run', 'prep', 'done', 'fail', 'finish', 'abrtg', 'abrtd', 'finishg', 'toprep', 'preprc', 'tobrok', 'broken', 'retry', 'incexe', 'refine' ]
taskstatedict = []
for i in range (0, len(taskstatelist)):
tsdict = { 'state' : taskstatelist[i], 'short' : taskstatelist_short[i] }
taskstatedict.append(tsdict)
errorcodelist = [
{ 'name' : 'brokerage', 'error' : 'brokerageerrorcode', 'diag' : 'brokerageerrordiag' },
{ 'name' : 'ddm', 'error' : 'ddmerrorcode', 'diag' : 'ddmerrordiag' },
{ 'name' : 'exe', 'error' : 'exeerrorcode', 'diag' : 'exeerrordiag' },
{ 'name' : 'jobdispatcher', 'error' : 'jobdispatchererrorcode', 'diag' : 'jobdispatchererrordiag' },
{ 'name' : 'pilot', 'error' : 'piloterrorcode', 'diag' : 'piloterrordiag' },
{ 'name' : 'sup', 'error' : 'superrorcode', 'diag' : 'superrordiag' },
{ 'name' : 'taskbuffer', 'error' : 'taskbuffererrorcode', 'diag' : 'taskbuffererrordiag' },
{ 'name' : 'transformation', 'error' : 'transexitcode', 'diag' : None },
]
_logger = logging.getLogger('bigpandamon')
LAST_N_HOURS_MAX = 0
#JOB_LIMIT = 0
#TFIRST = timezone.now()
#TLAST = timezone.now() - timedelta(hours=2400)
PLOW = 1000000
PHIGH = -1000000
standard_fields = [ 'processingtype', 'computingsite', 'jobstatus', 'prodsourcelabel', 'produsername', 'jeditaskid', 'workinggroup', 'transformation', 'cloud', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'specialhandling', 'priorityrange', 'reqid', 'minramcount' , 'eventservice', 'jobsubstatus', 'nucleus']
standard_sitefields = [ 'region', 'gocname', 'nickname', 'status', 'tier', 'comment_field', 'cloud', 'allowdirectaccess', 'allowfax', 'copytool', 'faxredirector', 'retry', 'timefloor' ]
standard_taskfields = [ 'workqueue_id', 'tasktype', 'superstatus', 'corecount', 'taskpriority', 'username', 'transuses', 'transpath', 'workinggroup', 'processingtype', 'cloud', 'campaign', 'project', 'stream', 'tag', 'reqid', 'ramcount', 'nucleus', 'eventservice']
VOLIST = [ 'atlas', 'bigpanda', 'htcondor', 'core', 'aipanda']
VONAME = { 'atlas' : 'ATLAS', 'bigpanda' : 'BigPanDA', 'htcondor' : 'HTCondor', 'core' : 'LSST', '' : '' }
VOMODE = ' '
def escapeInput(strToEscape):
charsToEscape = '!$%^&()[]{};,<>?\`~+%\'\"'
charsToReplace = '_'*len(charsToEscape)
tbl = strm.maketrans(charsToEscape, charsToReplace)
strToEscape = encoding.smart_str(strToEscape, encoding='ascii', errors='ignore')
strToEscape = strToEscape.translate(tbl)
return strToEscape
def setupSiteInfo(request):
requestParams = {}
if not 'requestParams' in request.session:
request.session['requestParams'] = requestParams
global homeCloud, objectStores, pandaSites, callCount
callCount += 1
if len(homeCloud) > 0 and callCount%100 != 1 and 'refresh' not in request.session['requestParams']: return
sflist = ('siteid','status','cloud','tier','comment_field','objectstore','catchall','corepower')
sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist)
for site in sites:
pandaSites[site['siteid']] = {}
for f in ( 'siteid', 'status', 'tier', 'comment_field', 'cloud', 'corepower' ):
pandaSites[site['siteid']][f] = site[f]
homeCloud[site['siteid']] = site['cloud']
if (site['catchall'] != None) and (site['catchall'].find('log_to_objectstore') >= 0 or site['objectstore'] != ''):
#print 'object store site', site['siteid'], site['catchall'], site['objectstore']
try:
fpath = getFilePathForObjectStore(site['objectstore'],filetype="logs")
#### dirty hack
fpath = fpath.replace('root://atlas-objectstore.cern.ch/atlas/logs','https://atlas-objectstore.cern.ch:1094/atlas/logs')
if fpath != "" and fpath.startswith('http'): objectStores[site['siteid']] = fpath
except:
pass
def initRequest(request):
global VOMODE, ENV, hostname
print("IP Address for debug-toolbar: " + request.META['REMOTE_ADDR'])
viewParams = {}
#if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
url = request.get_full_path()
u = urlparse(url)
query = parse_qs(u.query)
query.pop('timestamp', None)
u = u._replace(query=urlencode(query, True))
request.session['notimestampurl'] = urlunparse(u) + ('&' if len(query) > 0 else '?')
if 'USER' in os.environ and os.environ['USER'] != 'apache':
request.session['debug'] = True
elif 'debug' in request.GET and request.GET['debug'] == 'insider':
request.session['debug'] = True
djangosettings.DEBUG = True
else:
request.session['debug'] = False
djangosettings.DEBUG = False
if len(hostname) > 0: request.session['hostname'] = hostname
##self monitor
initSelfMonitor(request)
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
## Set default page lifetime in the http header, for the use of the front end cache
request.session['max_age_minutes'] = 3
## Is it an https connection with a legit cert presented by the user?
if 'SSL_CLIENT_S_DN' in request.META or 'HTTP_X_SSL_CLIENT_S_DN' in request.META:
if 'SSL_CLIENT_S_DN' in request.META:
request.session['userdn'] = request.META['SSL_CLIENT_S_DN']
else:
request.session['userdn'] = request.META['HTTP_X_SSL_CLIENT_S_DN']
userrec = Users.objects.filter(dn__startswith=request.session['userdn']).values()
if len(userrec) > 0:
request.session['username'] = userrec[0]['name']
ENV['MON_VO'] = ''
request.session['viewParams']['MON_VO'] = ''
VOMODE = ''
for vo in VOLIST:
if request.META['HTTP_HOST'].startswith(vo):
VOMODE = vo
## If DB is Oracle, set vomode to atlas
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
ENV['MON_VO'] = VONAME[VOMODE]
request.session['viewParams']['MON_VO'] = ENV['MON_VO']
global errorFields, errorCodes, errorStages
requestParams = {}
request.session['requestParams'] = requestParams
if request.method == 'POST':
for p in request.POST:
if p in ( 'csrfmiddlewaretoken', ): continue
pval = request.POST[p]
pval = pval.replace('+',' ')
request.session['requestParams'][p.lower()] = pval
else:
for p in request.GET:
pval = request.GET[p]
pval = pval.replace('+',' ')
if p.lower() != 'batchid': # Special requester exception
pval = pval.replace('#','')
## is it int, if it's supposed to be?
if p.lower() in ( 'days', 'hours', 'limit', 'display_limit', 'taskid', 'jeditaskid', 'jobsetid', 'corecount', 'taskpriority', 'priority', 'attemptnr', 'statenotupdated', 'tasknotupdated', ):
try:
i = int(request.GET[p])
except:
data = {
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
"errormessage" : "Illegal value '%s' for %s" % ( pval, p ),
}
return False, render_to_response('errorPage.html', data, RequestContext(request))
request.session['requestParams'][p.lower()] = pval
setupSiteInfo(request)
if len(errorFields) == 0:
codes = ErrorCodes.ErrorCodes()
errorFields, errorCodes, errorStages = codes.getErrorCodes()
return True, None
def preprocessWildCardString(strToProcess, fieldToLookAt):
if (len(strToProcess)==0):
return '(1=1)'
cardParametersRaw = strToProcess.split('*')
cardRealParameters = [s for s in cardParametersRaw if len(s) >= 1]
countRealParameters = len(cardRealParameters)
countParameters = len(cardParametersRaw)
if (countParameters==0):
return '(1=1)'
currentRealParCount = 0
currentParCount = 0
extraQueryString = '('
for parameter in cardParametersRaw:
leadStar = False
trailStar = False
if len(parameter) > 0:
if (currentParCount-1 >= 0):
# if len(cardParametersRaw[currentParCount-1]) == 0:
leadStar = True
if (currentParCount+1 < countParameters):
# if len(cardParametersRaw[currentParCount+1]) == 0:
trailStar = True
if fieldToLookAt.lower() == 'PRODUSERID':
leadStar = True
trailStar = True
if (leadStar and trailStar):
extraQueryString += '( '+fieldToLookAt+' LIKE (\'%%' + parameter +'%%\'))'
elif ( not leadStar and not trailStar):
extraQueryString += '( '+fieldToLookAt+' LIKE (\'' + parameter +'\'))'
elif (leadStar and not trailStar):
extraQueryString += '( '+fieldToLookAt+' LIKE (\'%%' + parameter +'\'))'
elif (not leadStar and trailStar):
extraQueryString += '( '+fieldToLookAt+' LIKE (\'' + parameter +'%%\'))'
currentRealParCount+=1
if currentRealParCount < countRealParameters:
extraQueryString += ' AND '
currentParCount+=1
extraQueryString += ')'
return extraQueryString
def setupView(request, opmode='', hours=0, limit=-99, querytype='job', wildCardExt=False):
viewParams = {}
if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
LAST_N_HOURS_MAX = 0
excludeJobNameFromWildCard = True
if 'jobname' in request.session['requestParams']:
jobrequest = request.session['requestParams']['jobname']
if (('*' in jobrequest) or ('|' in jobrequest)):
excludeJobNameFromWildCard = False
wildSearchFields = []
if querytype=='job':
for field in Jobsactive4._meta.get_all_field_names():
if (Jobsactive4._meta.get_field(field).get_internal_type() == 'CharField'):
if not (field == 'jobstatus' or field == 'modificationhost' or field=='batchid' or ( excludeJobNameFromWildCard and field == 'jobname') ):
wildSearchFields.append(field)
if querytype=='task':
for field in JediTasks._meta.get_all_field_names():
if (JediTasks._meta.get_field(field).get_internal_type() == 'CharField'):
if not (field == 'status' or field == 'modificationhost'):
wildSearchFields.append(field)
deepquery = False
fields = standard_fields
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
elif limit != -99 and limit > 0:
request.session['JOB_LIMIT'] = limit
elif VOMODE == 'atlas':
request.session['JOB_LIMIT'] = 10000
else:
request.session['JOB_LIMIT'] = 10000
if VOMODE == 'atlas':
LAST_N_HOURS_MAX = 12
else:
LAST_N_HOURS_MAX = 7*24
if VOMODE == 'atlas':
if 'cloud' not in fields: fields.append('cloud')
if 'atlasrelease' not in fields: fields.append('atlasrelease')
if 'produsername' in request.session['requestParams'] or 'jeditaskid' in request.session['requestParams'] or 'user' in request.session['requestParams']:
if 'jobsetid' not in fields: fields.append('jobsetid')
if ('hours' not in request.session['requestParams']) and ('days' not in request.session['requestParams']) and ('jobsetid' in request.session['requestParams'] or 'taskid' in request.session['requestParams'] or 'jeditaskid' in request.session['requestParams']):
## Cases where deep query is safe. Unless the time depth is specified in URL.
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
deepquery = True
else:
if 'jobsetid' in fields: fields.remove('jobsetid')
else:
fields.append('vo')
if hours > 0:
## Call param overrides default hours, but not a param on the URL
LAST_N_HOURS_MAX = hours
## For site-specific queries, allow longer time window
if 'computingsite' in request.session['requestParams']:
LAST_N_HOURS_MAX = 12
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
LAST_N_HOURS_MAX = 3*24
## hours specified in the URL takes priority over the above
if 'hours' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['hours'])
if 'days' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['days'])*24
## Exempt single-job, single-task etc queries from time constraint
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
if 'jeditaskid' in request.session['requestParams']: deepquery = True
if 'taskid' in request.session['requestParams']: deepquery = True
if 'pandaid' in request.session['requestParams']: deepquery = True
if 'jobname' in request.session['requestParams']: deepquery = True
if 'batchid' in request.session['requestParams']: deepquery = True
if deepquery:
opmode = 'notime'
hours = LAST_N_HOURS_MAX = 24*180
request.session['JOB_LIMIT'] = 999999
if opmode != 'notime':
if LAST_N_HOURS_MAX <= 72 :
request.session['viewParams']['selection'] = ", last %s hours" % LAST_N_HOURS_MAX
else:
request.session['viewParams']['selection'] = ", last %d days" % (float(LAST_N_HOURS_MAX)/24.)
#if JOB_LIMIT < 999999 and JOB_LIMIT > 0:
# viewParams['selection'] += ", <font style='color:#FF8040; size=-1'>Warning: limit %s per job table</font>" % JOB_LIMIT
request.session['viewParams']['selection'] += ". <b>Params:</b> "
#if 'days' not in requestParams:
# viewParams['selection'] += "hours=%s" % LAST_N_HOURS_MAX
#else:
# viewParams['selection'] += "days=%s" % int(LAST_N_HOURS_MAX/24)
if request.session['JOB_LIMIT'] < 100000 and request.session['JOB_LIMIT'] > 0:
request.session['viewParams']['selection'] += " <b>limit=</b>%s" % request.session['JOB_LIMIT']
else:
request.session['viewParams']['selection'] = ""
for param in request.session['requestParams']:
if request.session['requestParams'][param] == 'None': continue
if request.session['requestParams'][param] == '': continue
if param == 'display_limit': continue
if param == 'sortby': continue
if param == 'limit' and request.session['JOB_LIMIT']>0: continue
request.session['viewParams']['selection'] += " <b>%s=</b>%s " % ( param, request.session['requestParams'][param] )
startdate = None
if 'date_from' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_from'],'%Y-%m-%d')
startdate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if not startdate:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
# startdate = startdate.strftime(defaultDatetimeFormat)
enddate = None
if 'date_to' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_to'],'%Y-%m-%d')
enddate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if 'earlierthan' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=int(request.session['requestParams']['earlierthan']))
# enddate = enddate.strftime(defaultDatetimeFormat)
if 'earlierthandays' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=int(request.session['requestParams']['earlierthandays'])*24)
# enddate = enddate.strftime(defaultDatetimeFormat)
if enddate == None:
enddate = timezone.now()#.strftime(defaultDatetimeFormat)
query = { 'modificationtime__range' : [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)] }
request.session['TFIRST'] = startdate #startdate[:18]
request.session['TLAST'] = enddate#enddate[:18]
### Add any extensions to the query determined from the URL
for vo in [ 'atlas', 'core' ]:
if request.META['HTTP_HOST'].startswith(vo):
query['vo'] = vo
for param in request.session['requestParams']:
if param in ('hours', 'days'): continue
if param == 'cloud' and request.session['requestParams'][param] == 'All': continue
elif param == 'priorityrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['currentpriority__gte'] = plo
query['currentpriority__lte'] = phi
elif param == 'jobsetrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['jobsetid__gte'] = plo
query['jobsetid__lte'] = phi
elif param == 'user' or param == 'username':
if querytype == 'job':
query['produsername__icontains'] = request.session['requestParams'][param].strip()
elif param in ( 'project', ) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__istartswith'] = val
elif param in ( 'outputfiletype', ) and querytype != 'task':
val = request.session['requestParams'][param]
query['destinationdblock__icontains'] = val
elif param in ( 'stream', ) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__icontains'] = val
elif param in ( 'tag', ) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__endswith'] = val
elif param == 'reqid_from':
val = int(request.session['requestParams'][param])
query['reqid__gte'] = val
elif param == 'reqid_to':
val = int(request.session['requestParams'][param])
query['reqid__lte'] = val
elif param == 'processingtype':
val = request.session['requestParams'][param]
query['processingtype'] = val
elif param == 'mismatchedcloudsite' and request.session['requestParams'][param] == 'true':
listOfCloudSitesMismatched = cache.get('mismatched-cloud-sites-list')
if (listOfCloudSitesMismatched is None) or (len(listOfCloudSitesMismatched) == 0):
request.session['viewParams']['selection'] += " <b>The query can not be processed because list of mismatches is not found. Please visit %s/dash/production/?cloudview=region page and then try again</b>" % request.session['hostname']
else:
extraQueryString = '('
for count, cloudSitePair in enumerate(listOfCloudSitesMismatched):
extraQueryString += ' ( (cloud=\'%s\') and (computingsite=\'%s\') ) ' % (cloudSitePair[1], cloudSitePair[0])
if (count < (len(listOfCloudSitesMismatched)-1)):
extraQueryString += ' OR '
extraQueryString += ')'
if querytype == 'task':
for field in JediTasks._meta.get_all_field_names():
#for param in requestParams:
if param == field:
if param == 'ramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit)*1000, int(rightlimit)*1000-1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'transpath':
query['%s__endswith' % param] = request.session['requestParams'][param]
elif param == 'tasktype':
ttype = request.session['requestParams'][param]
if ttype.startswith('anal'): ttype='anal'
elif ttype.startswith('prod'): ttype='prod'
query[param] = ttype
elif param == 'superstatus':
val = escapeInput(request.session['requestParams'][param])
values = val.split('|')
query['superstatus__in'] = values
elif param == 'reqid':
val = escapeInput(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'eventservice':
if request.session['requestParams'][param]=='eventservice' or request.session['requestParams'][param]=='1':
query['eventservice'] = 1
else:
query['eventservice'] = 0
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
else:
for field in Jobsactive4._meta.get_all_field_names():
if param == field:
if param == 'minramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit)*1000, int(rightlimit)*1000-1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'specialhandling':
query['specialhandling__contains'] = request.session['requestParams'][param]
elif param == 'reqid':
val = escapeInput(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'transformation' or param == 'transpath':
query['%s__endswith' % param] = request.session['requestParams'][param]
elif param == 'modificationhost' and request.session['requestParams'][param].find('@') < 0:
query['%s__contains' % param] = request.session['requestParams'][param]
elif param == 'jeditaskid':
if request.session['requestParams']['jeditaskid'] != 'None':
if int(request.session['requestParams']['jeditaskid']) < 4000000:
query['taskid'] = request.session['requestParams'][param]
else:
query[param] = request.session['requestParams'][param]
elif param == 'taskid':
if request.session['requestParams']['taskid'] != 'None': query[param] = request.session['requestParams'][param]
elif param == 'pandaid':
try:
pid = request.session['requestParams']['pandaid']
if pid.find(',') >= 0:
pidl = pid.split(',')
query['pandaid__in'] = pidl
else:
query['pandaid'] = int(pid)
except:
query['jobname'] = request.session['requestParams']['pandaid']
elif param == 'jobstatus' and request.session['requestParams'][param] == 'finished' and ( ('mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'eventservice') or ('jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice') ):
query['jobstatus__in'] = ( 'finished', 'cancelled' )
elif param == 'jobstatus':
val = escapeInput(request.session['requestParams'][param])
values = val.split('|')
query['jobstatus__in'] = values
elif param == 'eventservice':
if request.session['requestParams'][param]=='esmerge' or request.session['requestParams'][param]== '2':
query['eventservice'] = 2
elif request.session['requestParams'][param]=='eventservice' or request.session['requestParams'][param]== '1':
query['eventservice'] = 1
elif request.session['requestParams'][param] == 'not2':
try:
extraQueryString += ' AND (eventservice != 2) '
except NameError:
extraQueryString = '(eventservice != 2)'
else:
query['eventservice__isnull']=True
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
else:
jobtype = opmode
if jobtype.startswith('anal'):
query['prodsourcelabel__in'] = ['panda', 'user', 'prod_test', 'rc_test']
elif jobtype.startswith('prod'):
query['prodsourcelabel__in'] = ['managed', 'prod_test', 'rc_test']
elif jobtype == 'groupproduction':
query['prodsourcelabel'] = 'managed'
query['workinggroup__isnull'] = False
elif jobtype == 'eventservice':
query['eventservice']=1
elif jobtype == 'esmerge':
query['eventservice']=2
elif jobtype.find('test') >= 0:
query['prodsourcelabel__icontains'] = jobtype
if (wildCardExt == False):
return query
try:
extraQueryString += ' AND '
except NameError:
extraQueryString = ''
wildSearchFields = (set(wildSearchFields) & set(request.session['requestParams'].keys()))
wildSearchFields1 = set()
for currenfField in wildSearchFields:
if not(currenfField.lower() == 'transformation'):
if not ( (currenfField.lower() == 'cloud') & ( any(card.lower() == 'all' for card in request.session['requestParams'][currenfField].split('|')))):
wildSearchFields1.add(currenfField)
wildSearchFields = wildSearchFields1
lenWildSearchFields = len(wildSearchFields)
currentField = 1
for currenfField in wildSearchFields:
extraQueryString += '('
wildCards = request.session['requestParams'][currenfField].split('|')
countCards = len(wildCards)
currentCardCount = 1
if not ((currenfField.lower() == 'cloud') & ( any(card.lower() == 'all' for card in wildCards))):
for card in wildCards:
extraQueryString += preprocessWildCardString(card, currenfField)
if (currentCardCount < countCards): extraQueryString +=' OR '
currentCardCount += 1
extraQueryString += ')'
if (currentField < lenWildSearchFields): extraQueryString +=' AND '
currentField += 1
if ('jobparam' in request.session['requestParams'].keys()):
jobParWildCards = request.session['requestParams']['jobparam'].split('|')
jobParCountCards = len(jobParWildCards)
jobParCurrentCardCount = 1
extraJobParCondition = '('
for card in jobParWildCards:
extraJobParCondition += preprocessWildCardString( escapeInput(card) , 'JOBPARAMETERS')
if (jobParCurrentCardCount < jobParCountCards): extraJobParCondition +=' OR '
jobParCurrentCardCount += 1
extraJobParCondition += ')'
pandaIDs = []
jobParamQuery = { 'modificationtime__range' : [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)] }
jobs = Jobparamstable.objects.filter(**jobParamQuery).extra(where=[extraJobParCondition]).values('pandaid')
for values in jobs:
pandaIDs.append(values['pandaid'])
query['pandaid__in'] = pandaIDs
if extraQueryString.endswith(' AND '):
extraQueryString=extraQueryString[:-5]
if (len(extraQueryString) < 2):
extraQueryString = '1=1'
return (query,extraQueryString, LAST_N_HOURS_MAX)
def cleanJobList(request, jobl, mode='nodrop', doAddMeta = True):
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop': mode='drop'
doAddMetaStill = False
if 'fields' in request.session['requestParams']:
fieldsStr = request.session['requestParams']['fields']
fields = fieldsStr.split("|")
if 'metastruct' in fields:
doAddMetaStill = True
if doAddMeta:
jobs = addJobMetadata(jobl, doAddMetaStill)
else:
jobs = jobl
for job in jobs:
if isEventService(job):
if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 111:
job['taskbuffererrordiag'] = 'Rerun scheduled to pick up unprocessed events'
job['piloterrorcode'] = 0
job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 112:
# job['taskbuffererrordiag'] = 'All events processed, merge job created'
job['piloterrorcode'] = 0
job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 114:
job['taskbuffererrordiag'] = 'No rerun to pick up unprocessed, at max attempts'
job['piloterrorcode'] = 0
job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
#if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 115:
# job['taskbuffererrordiag'] = 'No events remaining, other jobs still processing'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# #job['jobstatus'] = 'finished'
if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 116:
job['taskbuffererrordiag'] = 'No remaining event ranges to allocate'
job['piloterrorcode'] = 0
job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
#job['jobstatus'] = 'finished'
if 'jobmetrics' in job:
pat = re.compile('.*mode\=([^\s]+).*HPCStatus\=([A-Za-z0-9]+)')
mat = pat.match(job['jobmetrics'])
if mat:
job['jobmode'] = mat.group(1)
job['substate'] = mat.group(2)
pat = re.compile('.*coreCount\=([0-9]+)')
mat = pat.match(job['jobmetrics'])
if mat:
job['corecount'] = mat.group(1)
if 'jobsubstatus' in job and job['jobstatus']=='closed' and job['jobsubstatus']=='toreassign':
job['jobstatus']+= ':' + job['jobsubstatus']
if 'eventservice' in job:
if job['eventservice'] == 1 :
job['eventservice']= 'eventservice'
elif job['eventservice'] == 2:
job['eventservice']= 'esmerge'
else:
job['eventservice']= 'ordinary'
if 'destinationdblock' in job:
ddbfields = job['destinationdblock'].split('.')
if len(ddbfields) == 6:
job['outputfiletype'] = ddbfields[4]
elif len(ddbfields) >= 7:
job['outputfiletype'] = ddbfields[6]
else:
job['outputfiletype'] = '?'
#print job['destinationdblock'], job['outputfiletype']
try:
job['homecloud'] = homeCloud[job['computingsite']]
except:
job['homecloud'] = None
if 'produsername' in job and not job['produsername']:
if ('produserid' in job) and job['produserid']:
job['produsername'] = job['produserid']
else:
job['produsername'] = 'Unknown'
if job['transformation']: job['transformation'] = job['transformation'].split('/')[-1]
if (job['jobstatus'] == 'failed' or job['jobstatus'] == 'cancelled') and 'brokerageerrorcode' in job:
job['errorinfo'] = errorInfo(job,nchars=70)
else:
job['errorinfo'] = ''
job['jobinfo'] = ''
if isEventService(job):
if 'taskbuffererrordiag' in job and len(job['taskbuffererrordiag']) > 0:
job['jobinfo'] = job['taskbuffererrordiag']
elif 'specialhandling' in job and job['specialhandling'] == 'esmerge':
job['jobinfo'] = 'Event service merge job'
else:
job['jobinfo'] = 'Event service job'
job['duration'] = ""
job['durationsec'] = 0
#if job['jobstatus'] in ['finished','failed','holding']:
if 'endtime' in job and 'starttime' in job and job['starttime']:
starttime = job['starttime']
if job['endtime']:
endtime = job['endtime']
else:
endtime = timezone.now()
duration = max(endtime - starttime, timedelta(seconds=0))
ndays = duration.days
strduration = str(timedelta(seconds=duration.seconds))
job['duration'] = "%s:%s" % ( ndays, strduration )
job['durationsec'] = ndays*24*3600+duration.seconds
job['waittime'] = ""
#if job['jobstatus'] in ['running','finished','failed','holding','cancelled','transferring']:
if 'creationtime' in job and 'starttime' in job and job['creationtime']:
creationtime = job['creationtime']
if job['starttime']:
starttime = job['starttime']
else:
starttime = timezone.now()
wait = starttime - creationtime
ndays = wait.days
strwait = str(timedelta(seconds=wait.seconds))
job['waittime'] = "%s:%s" % (ndays, strwait)
if 'currentpriority' in job:
plo = int(job['currentpriority'])-int(job['currentpriority'])%100
phi = plo+99
job['priorityrange'] = "%d:%d" % ( plo, phi )
if 'jobsetid' in job and job['jobsetid']:
plo = int(job['jobsetid'])-int(job['jobsetid'])%100
phi = plo+99
job['jobsetrange'] = "%d:%d" % ( plo, phi )
if 'corecount' in job and job['corecount'] is None:
job['corecount']=1
## drop duplicate jobs
droplist = []
job1 = {}
newjobs = []
for job in jobs:
pandaid = job['pandaid']
dropJob = 0
if pandaid in job1:
## This is a duplicate. Drop it.
dropJob = 1
else:
job1[pandaid] = 1
if (dropJob == 0):
newjobs.append(job)
jobs = newjobs
if mode == 'nodrop':
print 'job list cleaned'
return jobs
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
newjobs = []
if len(taskids) == 1:
for task in taskids:
retryquery = {}
retryquery['jeditaskid'] = task
retries = JediJobRetryHistory.objects.filter(**retryquery).extra(where=["OLDPANDAID!=NEWPANDAID AND RELATIONTYPE IN ('', 'retry', 'pmerge', 'merge')"]).order_by('newpandaid').values()
hashRetries = {}
for retry in retries:
hashRetries[retry['oldpandaid']] = retry
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
if len(taskids) == 1:
if hashRetries.has_key(pandaid):
retry = hashRetries[pandaid]
if not isEventService(job):
if retry['relationtype'] == '' or retry['relationtype'] == 'retry' or ( job['processingtype'] == 'pmerge' and retry['relationtype'] == 'merge'):
dropJob = retry['newpandaid']
else:
mergeCand = [x['jobsetid'] for x in jobs if x['pandaid']==retry['newpandaid'] ]
if ( len(mergeCand) > 0) and (job['jobsetid'] == mergeCand[0]):
dropJob = 1
if 'jobstatus' in request.session['requestParams'] and request.session['requestParams'][
'jobstatus'] == 'cancelled' and job['jobstatus'] != 'cancelled':
dropJob = 1
if (dropJob == 0):
newjobs.append(job)
else:
droplist.append( { 'pandaid' : pandaid, 'newpandaid' : dropJob } )
#droplist = sorted(droplist, key=lambda x:-x['modificationtime'], reverse=True)
jobs = newjobs
global PLOW, PHIGH
request.session['TFIRST'] = timezone.now()#.strftime(defaultDatetimeFormat)
request.session['TLAST'] = (timezone.now() - timedelta(hours=2400))#.strftime(defaultDatetimeFormat)
PLOW = 1000000
PHIGH = -1000000
for job in jobs:
if job['modificationtime'] > request.session['TLAST']: request.session['TLAST'] = job['modificationtime']
if job['modificationtime'] < request.session['TFIRST']: request.session['TFIRST'] = job['modificationtime']
if job['currentpriority'] > PHIGH: PHIGH = job['currentpriority']
if job['currentpriority'] < PLOW: PLOW = job['currentpriority']
jobs = sorted(jobs, key=lambda x:x['modificationtime'], reverse=True)
print 'job list cleaned'
return jobs
def cleanTaskList(request, tasks):
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
for task in tasks:
if task['transpath']: task['transpath'] = task['transpath'].split('/')[-1]
if task['statechangetime'] == None: task['statechangetime'] = task['modificationtime']
## Get status of input processing as indicator of task progress
dsquery = {}
dsquery['type__in'] = ['input', 'pseudo_input' ]
dsquery['masterid__isnull'] = True
taskl = []
for t in tasks:
taskl.append(t['jeditaskid'])
# dsquery['jeditaskid__in'] = taskl
random.seed()
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
for id in taskl:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (tmpTableName,id,transactionKey)) # Backend dependable
connection.commit()
dsets = JediDatasets.objects.filter(**dsquery).extra(where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid','nfiles','nfilesfinished','nfilesfailed')
dsinfo = {}
if len(dsets) > 0:
for ds in dsets:
taskid = ds['jeditaskid']
if taskid not in dsinfo:
dsinfo[taskid] = []
dsinfo[taskid].append(ds)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
for task in tasks:
if 'totevrem' not in task:
task['totevrem'] = None
if 'eventservice' in task:
if task['eventservice']==1:
task['eventservice']='eventservice'
else:
task['eventservice']='ordinary'
if 'errordialog' in task:
if len(task['errordialog']) > 100: task['errordialog'] = task['errordialog'][:90]+'...'
if 'reqid' in task and task['reqid'] < 100000 and task['reqid'] > 100 and task['reqid'] != 300 and ( ('tasktype' in task) and (not task['tasktype'].startswith('anal'))):
task['deftreqid'] = task['reqid']
if 'corecount' in task and task['corecount'] is None:
task['corecount']=1
#if task['status'] == 'running' and task['jeditaskid'] in dsinfo:
dstotals = {}
dstotals['nfiles'] = 0
dstotals['nfilesfinished'] = 0
dstotals['nfilesfailed'] = 0
dstotals['pctfinished'] = 0
dstotals['pctfailed'] = 0
if (task['jeditaskid'] in dsinfo):
nfiles = 0
nfinished = 0
nfailed = 0
for ds in dsinfo[task['jeditaskid']]:
if int(ds['nfiles']) > 0:
nfiles += int(ds['nfiles'])
nfinished += int(ds['nfilesfinished'])
nfailed += int(ds['nfilesfailed'])
if nfiles > 0:
dstotals = {}
dstotals['nfiles'] = nfiles
dstotals['nfilesfinished'] = nfinished
dstotals['nfilesfailed'] = nfailed
dstotals['pctfinished'] = int(100.*nfinished/nfiles)
dstotals['pctfailed'] = int(100.*nfailed/nfiles)
task['dsinfo'] = dstotals
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
tasks = sorted(tasks, key=lambda x:x['modificationtime'])
if sortby == 'time-descending':
tasks = sorted(tasks, key=lambda x:x['modificationtime'], reverse=True)
if sortby == 'statetime-descending':
tasks = sorted(tasks, key=lambda x:x['statechangetime'], reverse=True)
elif sortby == 'priority':
tasks = sorted(tasks, key=lambda x:x['taskpriority'], reverse=True)
elif sortby == 'nfiles':
tasks = sorted(tasks, key=lambda x:x['dsinfo']['nfiles'], reverse=True)
elif sortby == 'pctfinished':
tasks = sorted(tasks, key=lambda x:x['dsinfo']['pctfinished'], reverse=True)
elif sortby == 'pctfailed':
tasks = sorted(tasks, key=lambda x:x['dsinfo']['pctfailed'], reverse=True)
elif sortby == 'taskname':
tasks = sorted(tasks, key=lambda x:x['taskname'])
elif sortby == 'jeditaskid' or sortby == 'taskid':
tasks = sorted(tasks, key=lambda x:-x['jeditaskid'])
elif sortby == 'cloud':
tasks = sorted(tasks, key=lambda x:x['cloud'], reverse=True)
else:
sortby = "jeditaskid"
tasks = sorted(tasks, key=lambda x:-x['jeditaskid'])
return tasks
def jobSummaryDict(request, jobs, fieldlist = None):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
if fieldlist:
flist = fieldlist
else:
flist = standard_fields
for job in jobs:
for f in flist:
if f in job and job[f]:
if f == 'taskid' and int(job[f]) < 1000000 and 'produsername' not in request.session['requestParams']: continue
if f == 'nucleus' and job[f] is None: continue
if f == 'specialhandling':
if not 'specialhandling' in sumd: sumd['specialhandling'] = {}
shl = job['specialhandling'].split()
for v in shl:
if not v in sumd['specialhandling']: sumd['specialhandling'][v] = 0
sumd['specialhandling'][v] += 1
else:
if not f in sumd: sumd[f] = {}
if not job[f] in sumd[f]: sumd[f][job[f]] = 0
sumd[f][job[f]] += 1
for extra in ( 'jobmode', 'substate', 'outputfiletype' ):
if extra in job:
if not extra in sumd: sumd[extra] = {}
if not job[extra] in sumd[extra]: sumd[extra][job[extra]] = 0
sumd[extra][job[extra]] += 1
## event service
esjobdict = {}
esjobs = []
for job in jobs:
if isEventService(job):
esjobs.append(job['pandaid'])
#esjobdict[job['pandaid']] = {}
#for s in eventservicestatelist:
# esjobdict[job['pandaid']][s] = 0
if len(esjobs) > 0:
sumd['eventservicestatus'] = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id,transactionKey))
query = """INSERT INTO """+tmpTableName+"""(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
new_cur.execute("SELECT STATUS, COUNT(STATUS) AS COUNTSTAT FROM "
" (SELECT DISTINCT PANDAID, STATUS FROM ATLAS_PANDA.JEDI_EVENTS WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i))t1 "
"GROUP BY STATUS" % (tmpTableName, transactionKey))
evtable = dictfetchall(new_cur)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
for ev in evtable:
evstat = eventservicestatelist[ev['STATUS']]
sumd['eventservicestatus'][evstat] = ev['COUNTSTAT']
#for ev in evtable:
# evstat = eventservicestatelist[ev['STATUS']]
# if evstat not in sumd['eventservicestatus']:
# sumd['eventservicestatus'][evstat] = 0
# sumd['eventservicestatus'][evstat] += 1
# #esjobdict[ev['PANDAID']][evstat] += 1
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
if f == 'minramcount':
newvalues = {}
for ky in kys:
roundedval = int(ky/1000)
if roundedval in newvalues:
newvalues[roundedval] += sumd[f][ky]
else:
newvalues[roundedval] = sumd[f][ky]
for ky in newvalues:
iteml.append({ 'kname' : str(ky) + '-'+str(ky+1) + 'GB', 'kvalue' : newvalues[ky] })
iteml = sorted(iteml, key=lambda x:str(x['kname']).lower())
else:
if f in ( 'priorityrange', 'jobsetrange' ):
skys = []
for k in kys:
skys.append( { 'key' : k, 'val' : int(k[:k.index(':')]) } )
skys = sorted(skys, key=lambda x:x['val'])
kys = []
for sk in skys:
kys.append(sk['key'])
elif f in ( 'attemptnr', 'jeditaskid', 'taskid', ):
kys = sorted(kys, key=lambda x:int(x))
else:
kys.sort()
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
iteml = sorted(iteml, key=lambda x:x['kvalue'], reverse=True)
elif f not in ( 'priorityrange', 'jobsetrange', 'attemptnr', 'jeditaskid', 'taskid', ):
iteml = sorted(iteml, key=lambda x:str(x['kname']).lower())
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml, esjobdict
def siteSummaryDict(sites):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
sumd['category'] = {}
sumd['category']['test'] = 0
sumd['category']['production'] = 0
sumd['category']['analysis'] = 0
sumd['category']['multicloud'] = 0
for site in sites:
for f in standard_sitefields:
if f in site:
if not f in sumd: sumd[f] = {}
if not site[f] in sumd[f]: sumd[f][site[f]] = 0
sumd[f][site[f]] += 1
isProd = True
if site['siteid'].find('ANALY') >= 0:
isProd = False
sumd['category']['analysis'] += 1
if site['siteid'].lower().find('test') >= 0:
isProd = False
sumd['category']['test'] += 1
if (site['multicloud'] is not None) and (site['multicloud'] != 'None') and (re.match('[A-Z]+',site['multicloud'])):
sumd['category']['multicloud'] += 1
if isProd: sumd['category']['production'] += 1
if VOMODE != 'atlas': del sumd['cloud']
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml
def userSummaryDict(jobs):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
for job in jobs:
if 'produsername' in job and job['produsername'] != None:
user = job['produsername'].lower()
else:
user = 'Unknown'
if not user in sumd:
sumd[user] = {}
for state in statelist:
sumd[user][state] = 0
sumd[user]['name'] = job['produsername']
sumd[user]['cputime'] = 0
sumd[user]['njobs'] = 0
for state in statelist:
sumd[user]['n'+state] = 0
sumd[user]['nsites'] = 0
sumd[user]['sites'] = {}
sumd[user]['nclouds'] = 0
sumd[user]['clouds'] = {}
sumd[user]['nqueued'] = 0
sumd[user]['latest'] = timezone.now() - timedelta(hours=2400)
sumd[user]['pandaid'] = 0
cloud = job['cloud']
site = job['computingsite']
cpu = float(job['cpuconsumptiontime'])/1.
state = job['jobstatus']
if job['modificationtime'] > sumd[user]['latest']: sumd[user]['latest'] = job['modificationtime']
if job['pandaid'] > sumd[user]['pandaid']: sumd[user]['pandaid'] = job['pandaid']
sumd[user]['cputime'] += cpu
sumd[user]['njobs'] += 1
if 'n%s' % (state) not in sumd[user]:
sumd[user]['n' + state] = 0
sumd[user]['n'+state] += 1
if not site in sumd[user]['sites']: sumd[user]['sites'][site] = 0
sumd[user]['sites'][site] += 1
if not site in sumd[user]['clouds']: sumd[user]['clouds'][cloud] = 0
sumd[user]['clouds'][cloud] += 1
for user in sumd:
sumd[user]['nsites'] = len(sumd[user]['sites'])
sumd[user]['nclouds'] = len(sumd[user]['clouds'])
sumd[user]['nqueued'] = sumd[user]['ndefined'] + sumd[user]['nwaiting'] + sumd[user]['nassigned'] + sumd[user]['nactivated']
sumd[user]['cputime'] = "%d" % float(sumd[user]['cputime'])
## convert to list ordered by username
ukeys = sumd.keys()
ukeys.sort()
suml = []
for u in ukeys:
uitem = {}
uitem['name'] = u
uitem['latest'] = sumd[u]['pandaid']
uitem['dict'] = sumd[u]
suml.append(uitem)
suml = sorted(suml, key=lambda x:-x['latest'])
return suml
def taskSummaryDict(request, tasks, fieldlist = None):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
if fieldlist:
flist = fieldlist
else:
flist = standard_taskfields
for task in tasks:
for f in flist:
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('analy'):
## Remove the noisy useless parameters in analysis listings
if flist in ( 'reqid', 'stream', 'tag' ): continue
if len(task['taskname'].split('.')) == 5:
if f == 'project':
try:
if not f in sumd: sumd[f] = {}
project = task['taskname'].split('.')[0]
if not project in sumd[f]: sumd[f][project] = 0
sumd[f][project] += 1
except:
pass
if f == 'stream':
try:
if not f in sumd: sumd[f] = {}
stream = task['taskname'].split('.')[2]
if not re.match('[0-9]+',stream):
if not stream in sumd[f]: sumd[f][stream] = 0
sumd[f][stream] += 1
except:
pass
if f == 'tag':
try:
if not f in sumd: sumd[f] = {}
tags = task['taskname'].split('.')[4]
if not tags.startswith('job_'):
tagl = tags.split('_')
tag = tagl[-1]
if not tag in sumd[f]: sumd[f][tag] = 0
sumd[f][tag] += 1
# for tag in tagl:
# if not tag in sumd[f]: sumd[f][tag] = 0
# sumd[f][tag] += 1
except:
pass
if f in task and task[f]:
val = task[f]
if val == 'anal': val = 'analy'
if not f in sumd: sumd[f] = {}
if not val in sumd[f]: sumd[f][val] = 0
sumd[f][val] += 1
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
if f != 'ramcount':
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
iteml = sorted(iteml, key=lambda x:str(x['kname']).lower())
else:
newvalues = {}
for ky in kys:
roundedval = int(ky/1000)
if roundedval in newvalues:
newvalues[roundedval] += sumd[f][ky]
else:
newvalues[roundedval] = sumd[f][ky]
for ky in newvalues:
iteml.append({ 'kname' : str(ky) + '-'+str(ky+1) + 'GB', 'kvalue' : newvalues[ky] })
iteml = sorted(iteml, key=lambda x:str(x['kname']).lower())
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml
def wgTaskSummary(request, fieldname='workinggroup', view='production', taskdays=3):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
query = {}
hours = 24*taskdays
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query['modificationtime__range'] = [startdate, enddate]
if fieldname == 'workinggroup': query['workinggroup__isnull'] = False
if view == 'production':
query['tasktype'] = 'prod'
elif view == 'analysis':
query['tasktype'] = 'anal'
if 'processingtype' in request.session['requestParams']:
query['processingtype'] = request.session['requestParams']['processingtype']
if 'workinggroup' in request.session['requestParams']:
query['workinggroup'] = request.session['requestParams']['workinggroup']
if 'project' in request.session['requestParams']:
query['taskname__istartswith'] = request.session['requestParams']['project']
summary = JediTasks.objects.filter(**query).values(fieldname,'status').annotate(Count('status')).order_by(fieldname,'status')
totstates = {}
tottasks = 0
wgsum = {}
for state in taskstatelist:
totstates[state] = 0
for rec in summary:
wg = rec[fieldname]
status = rec['status']
count = rec['status__count']
if status not in taskstatelist: continue
tottasks += count
totstates[status] += count
if wg not in wgsum:
wgsum[wg] = {}
wgsum[wg]['name'] = wg
wgsum[wg]['count'] = 0
wgsum[wg]['states'] = {}
wgsum[wg]['statelist'] = []
for state in taskstatelist:
wgsum[wg]['states'][state] = {}
wgsum[wg]['states'][state]['name'] = state
wgsum[wg]['states'][state]['count'] = 0
wgsum[wg]['count'] += count
wgsum[wg]['states'][status]['count'] += count
## convert to ordered lists
suml = []
for f in wgsum:
itemd = {}
itemd['field'] = f
itemd['count'] = wgsum[f]['count']
kys = taskstatelist
iteml = []
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : wgsum[f]['states'][ky]['count'] })
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml
def extensibleURL(request, xurl = ''):
""" Return a URL that is ready for p=v query extension(s) to be appended """
if xurl == '': xurl = request.get_full_path()
if xurl.endswith('/'): xurl = xurl[0:len(xurl)-1]
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
#if 'jobtype' in requestParams:
# xurl += "jobtype=%s&" % requestParams['jobtype']
return xurl
@cache_page(60*20)
def mainPage(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request)
debuginfo = None
if request.session['debug']:
debuginfo = "<h2>Debug info</h2>"
from django.conf import settings
for name in dir(settings):
debuginfo += "%s = %s<br>" % ( name, getattr(settings, name) )
debuginfo += "<br>******* Environment<br>"
for env in os.environ:
debuginfo += "%s = %s<br>" % ( env, os.environ[env] )
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'debuginfo' : debuginfo
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('core-mainPage.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
elif ( ('HTTP_ACCEPT' in request.META) and request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json')) or ('json' in request.session['requestParams']):
return HttpResponse('json', mimetype='text/html')
else:
return HttpResponse('not understood', mimetype='text/html')
def helpPage(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request)
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'prefix': getPrefix(request),
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('completeHelp.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
elif request.META.get('CONTENT_TYPE', 'text/plain') == 'application/json':
return HttpResponse('json', mimetype='text/html')
else:
return HttpResponse('not understood', mimetype='text/html')
def errorInfo(job, nchars=300, mode='html'):
errtxt = ''
err1 = ''
if int(job['brokerageerrorcode']) != 0:
errtxt += 'Brokerage error %s: %s <br>' % ( job['brokerageerrorcode'], job['brokerageerrordiag'] )
if err1 == '': err1 = "Broker: %s" % job['brokerageerrordiag']
if int(job['ddmerrorcode']) != 0:
errtxt += 'DDM error %s: %s <br>' % ( job['ddmerrorcode'], job['ddmerrordiag'] )
if err1 == '': err1 = "DDM: %s" % job['ddmerrordiag']
if int(job['exeerrorcode']) != 0:
errtxt += 'Executable error %s: %s <br>' % ( job['exeerrorcode'], job['exeerrordiag'] )
if err1 == '': err1 = "Exe: %s" % job['exeerrordiag']
if int(job['jobdispatchererrorcode']) != 0:
errtxt += 'Dispatcher error %s: %s <br>' % ( job['jobdispatchererrorcode'], job['jobdispatchererrordiag'] )
if err1 == '': err1 = "Dispatcher: %s" % job['jobdispatchererrordiag']
if int(job['piloterrorcode']) != 0:
errtxt += 'Pilot error %s: %s <br>' % ( job['piloterrorcode'], job['piloterrordiag'] )
if err1 == '': err1 = "Pilot: %s" % job['piloterrordiag']
if int(job['superrorcode']) != 0:
errtxt += 'Sup error %s: %s <br>' % ( job['superrorcode'], job['superrordiag'] )
if err1 == '': err1 = job['superrordiag']
if int(job['taskbuffererrorcode']) != 0:
errtxt += 'Task buffer error %s: %s <br>' % ( job['taskbuffererrorcode'], job['taskbuffererrordiag'] )
if err1 == '': err1 = 'Taskbuffer: %s' % job['taskbuffererrordiag']
if job['transexitcode'] != '' and job['transexitcode'] is not None and int(job['transexitcode']) > 0:
errtxt += 'Trf exit code %s.' % job['transexitcode']
if err1 == '': err1 = 'Trf exit code %s' % job['transexitcode']
desc = getErrorDescription(job)
if len(desc) > 0:
errtxt += '%s<br>' % desc
if err1 == '': err1 = getErrorDescription(job,mode='string')
if len(errtxt) > nchars:
ret = errtxt[:nchars] + '...'
else:
ret = errtxt[:nchars]
if err1.find('lost heartbeat') >= 0: err1 = 'lost heartbeat'
if err1.lower().find('unknown transexitcode') >= 0: err1 = 'unknown transexit'
if err1.find(' at ') >= 0: err1 = err1[:err1.find(' at ')-1]
if errtxt.find('lost heartbeat') >= 0: err1 = 'lost heartbeat'
err1 = err1.replace('\n',' ')
if mode == 'html':
return errtxt
else:
return err1[:nchars]
def jobParamList(request):
idlist = []
if 'pandaid' in request.session['requestParams']:
idstring = request.session['requestParams']['pandaid']
idstringl = idstring.split(',')
for id in idstringl:
idlist.append(int(id))
query = {}
query['pandaid__in'] = idlist
jobparams = Jobparamstable.objects.filter(**query).values()
if ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
return HttpResponse(json.dumps(jobparams, cls=DateEncoder), mimetype='text/html')
else:
return HttpResponse('not supported', mimetype='text/html')
def jobSummaryDictProto(request, dropmode, query, wildCardExtension):
sumd = []
esjobdict = []
condition = " RANGE_DAYS=>'0.5', "
if dropmode:
condition += " WITH_RETRIALS => 'N',"
else:
condition += " WITH_RETRIALS => 'Y',"
for item in standard_fields:
if item in query:
condition += " "+item+" => '"+request[item]+"',"
else:
pos = wildCardExtension.find(item, 0)
if pos > 0:
firstc = wildCardExtension.find("'", pos) + 1
sec = wildCardExtension.find("'", firstc)
value =wildCardExtension[firstc: sec]
condition += " "+item+" => '"+value+"',"
condition = condition[:-1]
#WITH_RETRIALS => 'N', COMPUTINGSITE=>'INFN-T1', JOBSTATUS=>'failed')
sqlRequest = "SELECT * FROM table(ATLAS_PANDABIGMON.QUERY_PANDAMON_JOBSPAGE_ALL(%s))" % condition
cur = connection.cursor()
cur.execute(sqlRequest)
rawsummary = cur.fetchall()
# first checkpoint
cur.close()
summaryhash = {}
for row in rawsummary:
if row[0] in summaryhash:
if row[1] in summaryhash[row[0]]:
summaryhash[row[0]][row[1]] += row[2]
else:
summaryhash[row[0]][row[1]] = row[2]
else:
item = {}
item[row[1]] = row[2]
summaryhash[row[0]] = item
#second checkpoint
shkeys = summaryhash.keys()
sumd = []
jobsToList = set()
njobs = 0
for shkey in shkeys:
if shkey != 'pandaid':
# check this condition
entry = {}
entry['field'] = shkey
entrlist = []
for subshkey in summaryhash[shkey].keys():
subentry = {}
subentry['kname'] = subshkey
subentry['kvalue'] = summaryhash[shkey][subshkey]
if (shkey == 'COMPUTINGSITE'):
njobs += summaryhash[shkey][subshkey]
entrlist.append(subentry)
entry['list'] = entrlist
sumd.append(entry)
else:
for subshkey in shkey.keys():
jobsToList.add(subshkey)
return sumd, esjobdict, jobsToList, njobs
@cache_page(60*20)
def jobListProto(request, mode=None, param=None):
valid, response = initRequest(request)
if not valid: return response
if 'dump' in request.session['requestParams'] and request.session['requestParams']['dump'] == 'parameters':
return jobParamList(request)
eventservice = False
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
eventservice = True
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams'][
'eventservice'] == '1'):
eventservice = True
noarchjobs = False
if ('noarchjobs' in request.session['requestParams'] and request.session['requestParams']['noarchjobs'] == '1'):
noarchjobs = True
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
if 'batchid' in request.session['requestParams']:
query['batchid'] = request.session['requestParams']['batchid']
jobs = []
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
values = Jobsactive4._meta.get_all_field_names()
elif eventservice:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock', 'jobmetrics', 'reqid', 'minramcount', 'statechangetime', 'jobsubstatus', 'eventservice'
else:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'computingelement', 'proddblock', 'destinationdblock', 'reqid', 'minramcount', 'statechangetime', 'avgvmem', 'maxvmem', 'maxpss', 'maxrss', 'nucleus', 'eventservice'
totalJobs = 0
showTop = 0
dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'drop': dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'nodrop': dropmode = False
sumd, esjobdict, jobsToList, njobs = jobSummaryDictProto(request, dropmode, query, wildCardExtension)
values = [int(val) for val in jobsToList]
newquery = {}
newquery['pandaid__in'] = values
jobs.extend(Jobsdefined4.objects.filter(**newquery).values(*values))
jobs.extend(Jobsactive4.objects.filter(**newquery).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**newquery).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**newquery).values(*values))
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
droppedIDs = set()
droppedPmerge = set()
jobs = cleanJobList(request, jobs)
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
if u'display_limit' in request.session['requestParams']:
if int(request.session['requestParams']['display_limit']) > njobs:
display_limit = njobs
else:
display_limit = int(request.session['requestParams']['display_limit'])
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 1000
url_nolimit = request.get_full_path()
njobsmax = display_limit
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'])
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x: x['statechangetime'], reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x: x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x: x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x: x['pandaid'], reverse=True)
else:
sortby = "time-descending"
if len(jobs) > 0 and 'modificationtime' in jobs[0]:
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
taskname = ''
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
if 'taskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['taskid'])
if 'produsername' in request.session['requestParams']:
user = request.session['requestParams']['produsername']
elif 'user' in request.session['requestParams']:
user = request.session['requestParams']['user']
else:
user = None
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
# show warning or not
if njobs <= request.session['JOB_LIMIT']:
showwarn = 0
else:
showwarn = 1
if 'jeditaskid' in request.session['requestParams']:
if len(jobs) > 0:
for job in jobs:
if 'maxvmem' in job:
if type(job['maxvmem']) is int and job['maxvmem'] > 0:
job['maxvmemmb'] = "%0.2f" % (job['maxvmem'] / 1000.)
job['avgvmemmb'] = "%0.2f" % (job['avgvmem'] / 1000.)
if 'maxpss' in job:
if type(job['maxpss']) is int and job['maxpss'] > 0:
job['maxpss'] = "%0.2f" % (job['maxpss'] / 1024.)
# errsByCount, errsBySite, errsByUser, errsByTask, errdSumd, errHist = errorSummaryDict(request, jobs, tasknamedict,
# testjobs)
xurl = extensibleURL(request)
print xurl
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
nosorturl = removeParam(nosorturl, 'display_limit', mode='extensible')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
nodropPartURL = cleanURLFromDropPart(xurl)
data = {
'prefix': getPrefix(request),
# 'errsByCount': errsByCount,
# 'errdSumd': errdSumd,
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'jobList': jobs[:njobsmax],
'jobtype': jobtype,
'njobs': njobs,
'user': user,
'sumd': sumd,
'xurl': xurl,
#'droplist': droplist,
#'ndrops': len(droplist) if len(droplist) > 0 else (- len(droppedPmerge)),
'ndrops': 0,
'tfirst': TFIRST,
'tlast': TLAST,
'plow': PLOW,
'phigh': PHIGH,
'joblimit': request.session['JOB_LIMIT'],
'limit': 0,
'totalJobs': totalJobs,
'showTop': showTop,
'url_nolimit': url_nolimit,
'display_limit': display_limit,
'sortby': sortby,
'nosorturl': nosorturl,
'taskname': taskname,
'flowstruct': flowstruct,
'nodropPartURL': nodropPartURL,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('jobListESProto.html', data, RequestContext(request))
else:
response = render_to_response('jobListProto.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@cache_page(60*20)
def jobList(request, mode=None, param=None):
valid, response = initRequest(request)
if not valid: return response
if 'dump' in request.session['requestParams'] and request.session['requestParams']['dump'] == 'parameters':
return jobParamList(request)
eventservice = False
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
eventservice = True
if 'eventservice' in request.session['requestParams'] and (request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams']['eventservice'] == '1'):
eventservice = True
noarchjobs=False
if ('noarchjobs' in request.session['requestParams'] and request.session['requestParams']['noarchjobs']=='1'):
noarchjobs=True
query,wildCardExtension,LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
if 'batchid' in request.session['requestParams']:
query['batchid'] = request.session['requestParams']['batchid']
jobs = []
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
values = Jobsactive4._meta.get_all_field_names()
elif eventservice:
values = 'jobsubstatus','produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock', 'jobmetrics', 'reqid', 'minramcount', 'statechangetime', 'jobsubstatus', 'eventservice'
else:
values = 'jobsubstatus','produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'computingelement', 'proddblock', 'destinationdblock', 'reqid', 'minramcount', 'statechangetime', 'avgvmem', 'maxvmem', 'maxpss' , 'maxrss', 'nucleus', 'eventservice'
JOB_LIMITS=request.session['JOB_LIMIT']
totalJobs = 0
showTop = 0
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
if 'transferringnotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, state='transferring', values=values, wildCardExtension=wildCardExtension)
elif 'statenotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, values=values, wildCardExtension=wildCardExtension)
else:
excludedTimeQuery = copy.deepcopy(query)
if ('modificationtime__range' in excludedTimeQuery):
del excludedTimeQuery['modificationtime__range']
jobs.extend(Jobsdefined4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
if not noarchjobs:
queryFrozenStates = []
if 'jobstatus' in request.session['requestParams']:
queryFrozenStates = filter(set(request.session['requestParams']['jobstatus'].split('|')).__contains__, [ 'finished', 'failed', 'cancelled', 'closed' ])
##hard limit is set to 2K
if ('jobstatus' not in request.session['requestParams'] or len(queryFrozenStates) > 0):
if ('limit' not in request.session['requestParams']):
request.session['JOB_LIMIT'] = 20000
JOB_LIMITS = 20000
showTop = 1
else:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
JOB_LIMITS = int(request.session['requestParams']['limit'])
archJobs = Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values)
totalJobs = len(archJobs)
jobs.extend(archJobs)
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop': dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'nodrop': dropmode = False
droplist = []
droppedIDs = set()
droppedPmerge = set()
if dropmode and (len(taskids) == 1):
print 'doing the drop'
for task in taskids:
retryquery = {}
retryquery['jeditaskid'] = task
retries = JediJobRetryHistory.objects.filter(**retryquery).extra(where=["OLDPANDAID!=NEWPANDAID AND RELATIONTYPE IN ('', 'retry', 'pmerge', 'merge', 'jobset_retry', 'es_merge')"]).order_by('newpandaid').values()
hashRetries = {}
for retry in retries:
hashRetries[retry['oldpandaid']] = retry
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
if not isEventService(job):
if hashRetries.has_key(pandaid):
retry = hashRetries[pandaid]
if retry['relationtype'] == '' or retry['relationtype'] == 'retry' or (
job['processingtype'] == 'pmerge' and retry['relationtype'] == 'merge'):
dropJob = retry['newpandaid']
else:
if (job['jobsetid'] in hashRetries) and ( hashRetries[job['jobsetid']]['relationtype'] == 'jobset_retry'):
dropJob = 1
else:
if (job['pandaid'] in hashRetries):
if (hashRetries[job['pandaid']]['relationtype'] == ('retry')):
dropJob = 1
# if (hashRetries[job['pandaid']]['relationtype'] == 'es_merge' and (
# job['jobsubstatus'] == 'es_merge')):
# dropJob = 1
if (dropJob == 0):
if (job['jobsetid'] in hashRetries) and (
hashRetries[job['jobsetid']]['relationtype'] in ('jobset_retry')):
dropJob = 1
if (job['jobstatus'] == 'closed' and (job['jobsubstatus'] in ('es_unused','es_inaction'))):
dropJob = 1
# if 'jobstatus' in request.session['requestParams'] and request.session['requestParams'][
# 'jobstatus'] == 'cancelled' and job['jobstatus'] != 'cancelled':
# dropJob = 1
if dropJob == 0 and not ('processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype'] == 'pmerge') :
if not (job['processingtype'] == 'pmerge'):
newjobs.append(job)
else:
droppedPmerge.add(pandaid)
elif (dropJob == 0):
newjobs.append(job)
else:
if not pandaid in droppedIDs:
droppedIDs.add(pandaid)
droplist.append( { 'pandaid' : pandaid, 'newpandaid' : dropJob } )
droplist = sorted(droplist, key=lambda x:-x['pandaid'])
jobs = newjobs
jobs = cleanJobList(request, jobs)
njobs = len(jobs)
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
if u'display_limit' in request.session['requestParams']:
if int(request.session['requestParams']['display_limit']) > njobs:
display_limit = njobs
else:
display_limit = int(request.session['requestParams']['display_limit'])
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 1000
url_nolimit = request.get_full_path()
njobsmax = display_limit
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'])
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'], reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x:x['statechangetime'], reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x:x['currentpriority'], reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x:x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x:x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x:x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x:x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x:x['pandaid'], reverse=True)
else:
sortby = "time-descending"
if len(jobs)>0 and 'modificationtime' in jobs[0]:
jobs = sorted(jobs, key=lambda x:x['modificationtime'], reverse=True)
taskname = ''
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid',request.session['requestParams']['jeditaskid'])
if 'taskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid',request.session['requestParams']['taskid'])
if 'produsername' in request.session['requestParams']:
user = request.session['requestParams']['produsername']
elif 'user' in request.session['requestParams']:
user = request.session['requestParams']['user']
else:
user = None
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
if (('datasets' in request.session['requestParams']) and (request.session['requestParams']['datasets'] == 'yes') and (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json')))):
for job in jobs:
files = []
files.extend(JediDatasetContents.objects.filter(pandaid=pandaid).order_by('type').values())
ninput = 0
if len(files) > 0:
for f in files:
if f['type'] == 'input': ninput += 1
f['fsizemb'] = "%0.2f" % (f['fsize']/1000000.)
dsets = JediDatasets.objects.filter(datasetid=f['datasetid']).values()
if len(dsets) > 0:
f['datasetname'] = dsets[0]['datasetname']
if True:
#if ninput == 0:
files.extend(Filestable4.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) == 0:
files.extend(FilestableArch.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f: f['datasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
files = sorted(files, key=lambda x:x['type'])
nfiles = len(files)
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
file['fsize'] = int(file['fsize']/1000000)
job['datasets'] = files
#show warning or not
if njobs<=request.session['JOB_LIMIT']:
showwarn=0
else:
showwarn=1
sumd, esjobdict = jobSummaryDict(request, jobs)
if 'jeditaskid' in request.session['requestParams']:
if len(jobs)>0:
for job in jobs:
if 'maxvmem' in job:
if type(job['maxvmem']) is int and job['maxvmem'] > 0:
job['maxvmemmb'] = "%0.2f" % (job['maxvmem']/1000.)
job['avgvmemmb'] = "%0.2f" % (job['avgvmem']/1000.)
if 'maxpss' in job:
if type(job['maxpss']) is int and job['maxpss'] > 0:
job['maxpss'] = "%0.2f" % (job['maxpss']/1024.)
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams']['prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
tasknamedict = taskNameDict(jobs)
errsByCount, errsBySite, errsByUser, errsByTask, errdSumd, errHist = errorSummaryDict(request,jobs, tasknamedict, testjobs)
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
# It was not find where esjobdict used
'''
if esjobdict and len(esjobdict) > 0:
for job in jobs:
if job['pandaid'] in esjobdict and job['specialhandling'].find('esmerge') < 0:
esjobstr = 'Dispatched event states: '
for s in esjobdict[job['pandaid']]:
if esjobdict[job['pandaid']][s] > 0:
esjobstr += " %s(%s) " % ( s, esjobdict[job['pandaid']][s] )
job['esjobdict'] = esjobstr
'''
xurl = extensibleURL(request)
print xurl
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
nosorturl = removeParam(nosorturl, 'display_limit', mode='extensible')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
nodropPartURL = cleanURLFromDropPart(xurl)
data = {
'prefix': getPrefix(request),
'errsByCount' : errsByCount,
'errdSumd' : errdSumd,
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'jobList': jobs[:njobsmax],
'jobtype' : jobtype,
'njobs' : njobs,
'user' : user,
'sumd' : sumd,
'xurl' : xurl,
'droplist' : droplist,
'ndrops' : len(droplist) if len(droplist) > 0 else (- len(droppedPmerge)),
'tfirst' : TFIRST,
'tlast' : TLAST,
'plow' : PLOW,
'phigh' : PHIGH,
'showwarn': showwarn,
'joblimit': request.session['JOB_LIMIT'],
'limit' : JOB_LIMITS,
'totalJobs': totalJobs,
'showTop' : showTop,
'url_nolimit' : url_nolimit,
'display_limit' : display_limit,
'sortby' : sortby,
'nosorturl' : nosorturl,
'taskname' : taskname,
'flowstruct' : flowstruct,
'nodropPartURL':nodropPartURL,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('jobListES.html', data, RequestContext(request))
else:
response = render_to_response('jobList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
if (('fields' in request.session['requestParams']) and (len(jobs) > 0)):
fields = request.session['requestParams']['fields'].split(',')
fields= (set(fields) & set(jobs[0].keys()))
for job in jobs:
for field in list(job.keys()):
if field in fields:
pass
else:
del job[field]
data = {
"selectionsummary": sumd,
"jobs": jobs,
"errsByCount": errsByCount,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
def isEventService(job):
if 'specialhandling' in job and job['specialhandling'] and ( job['specialhandling'].find('eventservice') >= 0 or job['specialhandling'].find('esmerge') >= 0 or (job['eventservice'] != 'ordinary' and job['eventservice'] > 0) ):
return True
else:
return False
def cleanURLFromDropPart(url):
posDropPart = url.find('mode')
if ( posDropPart== -1):
return url
else:
if url[posDropPart-1] == '&':
posDropPart -= 1
nextAmp = url.find('&', posDropPart+1)
if nextAmp == -1:
return url[0:posDropPart]
else:
return url[0:posDropPart] + url[nextAmp+1:]
def getSequentialRetries(pandaid, jeditaskid):
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retryquery['newpandaid'] = pandaid
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('oldpandaid').reverse().values()
newretries = []
newretries.extend(retries)
for retry in retries:
if retry['relationtype'] in ['merge','retry']:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['pandaid'] = retry['oldpandaid']
values = [ 'pandaid', 'jobstatus', 'jeditaskid' ]
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
if job['jobstatus'] == 'failed':
for retry in newretries:
if (retry['oldpandaid'] == job['pandaid']):
retry['relationtype'] = 'retry'
newretries.extend(getSequentialRetries(job['pandaid'], job['jeditaskid']))
outlist=[]
added_keys = set()
for row in newretries:
lookup = row['oldpandaid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def getSequentialRetries_ES(pandaid, jobsetid, jeditaskid, countOfInvocations, recurse = 0):
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retryquery['newpandaid'] = jobsetid
retryquery['relationtype'] = 'jobset_retry'
countOfInvocations.append(1)
newretries = []
if (len(countOfInvocations) < 300 ):
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('oldpandaid').reverse().values()
newretries.extend(retries)
for retry in retries:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['jobstatus'] = 'failed'
jsquery['jobsetid'] = retry['oldpandaid']
values = [ 'pandaid', 'jobstatus', 'jobsetid', 'jeditaskid' ]
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
if job['jobstatus'] == 'failed':
for retry in newretries:
if (retry['oldpandaid'] == job['jobsetid']):
retry['relationtype'] = 'retry'
retry['jobid'] = job['pandaid']
newretries.extend(getSequentialRetries_ES(job['pandaid'],
jobsetid, job['jeditaskid'], countOfInvocations, recurse+1))
outlist=[]
added_keys = set()
for row in newretries:
if 'jobid' in row:
lookup = row['jobid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def getSequentialRetries_ESupstream(pandaid, jobsetid, jeditaskid, countOfInvocations, recurse = 0):
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retryquery['oldpandaid'] = jobsetid
retryquery['relationtype'] = 'jobset_retry'
countOfInvocations.append(1)
newretries = []
if (len(countOfInvocations) < 300 ):
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').values()
newretries.extend(retries)
for retry in retries:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['jobsetid'] = retry['newpandaid']
values = [ 'pandaid', 'jobstatus', 'jobsetid', 'jeditaskid' ]
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
for retry in newretries:
if (retry['newpandaid'] == job['jobsetid']):
retry['relationtype'] = 'retry'
retry['jobid'] = job['pandaid']
outlist=[]
added_keys = set()
for row in newretries:
if 'jobid' in row:
lookup = row['jobid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def descendentjoberrsinfo(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
job_pandaid = job_jeditaskid = -1
if 'pandaid' in request.session['requestParams']:
job_pandaid = int(request.session['requestParams']['pandaid'])
if 'jeditaskid' in request.session['requestParams']:
job_jeditaskid = int(request.session['requestParams']['jeditaskid'])
if (job_pandaid==-1) or (job_jeditaskid==-1):
data = {"error":"no pandaid or jeditaskid supplied"}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
query = setupView(request, hours=365*24)
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**query).values())
if len(jobs) == 0:
del request.session['TFIRST']
del request.session['TLAST']
data = {"error":"job not found"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
job = jobs[0]
countOfInvocations = []
if not isEventService(job):
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['pandaid']
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries(job['pandaid'], job['jeditaskid'])
else:
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['jobsetid']
retryquery['relationtype'] = 'jobset_retry'
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
query = {'jeditaskid':job_jeditaskid}
jobslist = []
for retry in pretries:
jobslist.append(retry['oldpandaid'])
for retry in retries:
jobslist.append(retry['oldpandaid'])
query['pandaid__in'] = jobslist
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = cleanJobList(request, jobs, mode='nodrop')
errors = {}
for job in jobs:
errors[job['pandaid']] = getErrorDescription(job, mode='txt')
endSelfMonitor(request)
del request.session['TFIRST']
del request.session['TLAST']
response = render_to_response('descentJobsErrors.html', {'errors':errors}, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@csrf_exempt
@cache_page(60*20)
def jobInfo(request, pandaid=None, batchid=None, p2=None, p3=None, p4=None):
valid, response = initRequest(request)
if not valid: return response
eventservice = False
query = setupView(request, hours=365*24)
jobid = ''
if 'creator' in request.session['requestParams']:
## Find the job that created the specified file.
fquery = {}
fquery['lfn'] = request.session['requestParams']['creator']
fquery['type'] = 'output'
fileq = Filestable4.objects.filter(**fquery)
fileq = fileq.values('pandaid','type')
if fileq and len(fileq) > 0:
pandaid = fileq[0]['pandaid']
else:
fileq = FilestableArch.objects.filter(**fquery).values('pandaid','type')
if fileq and len(fileq) > 0:
pandaid = fileq[0]['pandaid']
if pandaid:
jobid = pandaid
try:
query['pandaid'] = int(pandaid)
except:
query['jobname'] = pandaid
if batchid:
jobid = batchid
query['batchid'] = batchid
if 'pandaid' in request.session['requestParams']:
try:
pandaid = int(request.session['requestParams']['pandaid'])
except ValueError:
pandaid = 0
jobid = pandaid
query['pandaid'] = pandaid
elif 'batchid' in request.session['requestParams']:
batchid = request.session['requestParams']['batchid']
jobid = "'"+batchid+"'"
query['batchid'] = batchid
elif 'jobname' in request.session['requestParams']:
jobid = request.session['requestParams']['jobname']
query['jobname'] = jobid
jobs = []
if pandaid or batchid:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = cleanJobList(request, jobs, mode='nodrop')
if len(jobs) == 0:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'pandaid': pandaid,
'job': None,
'jobid' : jobid,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('jobInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
job = {}
colnames = []
columns = []
try:
job = jobs[0]
tquery = {}
tquery['jeditaskid'] = job['jeditaskid']
tquery['storagetoken__isnull'] = False
storagetoken = JediDatasets.objects.filter(**tquery).values('storagetoken')
if storagetoken:
job['destinationse']=storagetoken[0]['storagetoken']
pandaid = job['pandaid']
colnames = job.keys()
colnames.sort()
for k in colnames:
val = job[k]
if job[k] == None:
val = ''
continue
pair = { 'name' : k, 'value' : val }
columns.append(pair)
except IndexError:
job = {}
## Check for logfile extracts
logs = Logstable.objects.filter(pandaid=pandaid)
if logs:
logextract = logs[0].log1
else:
logextract = None
files = []
typeFiles = {}
fileSummary = ''
inputFilesSize = 0
if 'nofiles' not in request.session['requestParams']:
## Get job files. First look in JEDI datasetcontents
print "Pulling file info"
files.extend(Filestable4.objects.filter(pandaid=pandaid).order_by('type').values())
ninput = 0
noutput = 0
npseudo_input = 0
if len(files) > 0:
for f in files:
f['destination'] = ' '
if f['type'] == 'input':
ninput += 1
inputFilesSize += f['fsize']/1048576.
if f['type'] in typeFiles:
typeFiles[f['type']] += 1
else:
typeFiles[f['type']] = 1
if f['type'] == 'output':
noutput += 1
if len(jobs[0]['jobmetrics']) > 0:
jobmetrics = dict(s.split('=') for s in jobs[0]['jobmetrics'].split(' '))
if 'logBucketID' in jobmetrics:
if int(jobmetrics['logBucketID']) in [3, 21, 45, 46, 104, 41, 105, 106, 42, 61, 21, 102, 103, 2, 82, 81, 82, 101]: #Bucket Codes for S3 destination
f['destination'] = 'S3'
if f['type'] == 'pseudo_input': npseudo_input += 1
f['fsizemb'] = "%0.2f" % (f['fsize']/1000000.)
dsets = JediDatasets.objects.filter(datasetid=f['datasetid']).values()
if len(dsets) > 0:
f['datasetname'] = dsets[0]['datasetname']
files = [x for x in files if x['destination'] != 'S3']
if len(typeFiles) > 0:
inputFilesSize = "%0.2f" % inputFilesSize
for i in typeFiles:
fileSummary += str(i) +': ' + str(typeFiles[i])
if (i == 'input'): fileSummary += ', size: '+inputFilesSize+'(MB)'
fileSummary += '; '
fileSummary = fileSummary[:-2]
if len(files) == 0:
files.extend(FilestableArch.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f: f['datasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
files = sorted(files, key=lambda x:x['type'])
nfiles = len(files)
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
file['fsize'] = int(file['fsize']/1000000)
if 'pilotid' in job and job['pilotid'] is not None and job['pilotid'].startswith('http'):
stdout = job['pilotid'].split('|')[0]
stderr = stdout.replace('.out','.err')
stdlog = stdout.replace('.out','.log')
else:
stdout = stderr = stdlog = None
# input,pseudo_input,output,log and alphabetically within those please
filesSorted = []
filesSorted.extend(sorted([file for file in files if file['type'] == 'input'], key=lambda x:x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'pseudo_input'], key=lambda x:x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'output'], key=lambda x:x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'log'], key=lambda x:x['lfn']))
files = filesSorted
## Check for object store based log
oslogpath = None
if 'computingsite' in job and job['computingsite'] in objectStores:
ospath = objectStores[job['computingsite']]
if 'lfn' in logfile:
if ospath.endswith('/'):
oslogpath = ospath + logfile['lfn']
else:
oslogpath = ospath + '/' + logfile['lfn']
## Check for debug info
if 'specialhandling' in job and job['specialhandling'].find('debug') >= 0:
debugmode = True
else:
debugmode = False
debugstdout = None
if debugmode:
if 'showdebug' in request.session['requestParams']:
debugstdoutrec = Jobsdebug.objects.filter(pandaid=pandaid).values()
if len(debugstdoutrec) > 0:
if 'stdout' in debugstdoutrec[0]: debugstdout = debugstdoutrec[0]['stdout']
if 'transformation' in job and job['transformation'] is not None and job['transformation'].startswith('http'):
job['transformation'] = "<a href='%s'>%s</a>" % ( job['transformation'], job['transformation'].split('/')[-1] )
if 'metastruct' in job:
job['metadata'] = json.dumps(job['metastruct'], sort_keys=True, indent=4, separators=(',', ': '))
## Get job parameters
print "getting job parameters"
jobparamrec = Jobparamstable.objects.filter(pandaid=pandaid)
jobparams = None
if len(jobparamrec) > 0:
jobparams = jobparamrec[0].jobparameters
#else:
# jobparamrec = JobparamstableArch.objects.filter(pandaid=pandaid)
# if len(jobparamrec) > 0:
# jobparams = jobparamrec[0].jobparameters
dsfiles = []
countOfInvocations = []
## If this is a JEDI job, look for job retries
if 'jeditaskid' in job and job['jeditaskid'] > 0:
print "looking for retries"
## Look for retries of this job
if not isEventService(job):
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['pandaid']
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries(job['pandaid'], job['jeditaskid'])
else:
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['jobsetid']
retryquery['relationtype'] = 'jobset_retry'
#retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
retries = getSequentialRetries_ESupstream(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
pretries = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
else:
retries = None
pretries = None
countOfInvocations = len(countOfInvocations)
## jobset info
libjob = None
runjobs = []
mergejobs = []
if 'jobset' in request.session['requestParams'] and 'jobsetid' in job and job['jobsetid'] > 0:
print "jobset info"
jsquery = {}
jsquery['jobsetid'] = job['jobsetid']
jsquery['produsername'] = job['produsername']
values = [ 'pandaid', 'prodsourcelabel', 'processingtype', 'transformation' ]
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
if len(jsjobs) > 0:
for j in jsjobs:
id = j['pandaid']
if j['transformation'].find('runAthena') >= 0:
runjobs.append(id)
elif j['transformation'].find('buildJob') >= 0:
libjob = id
if j['processingtype'] == 'usermerge':
mergejobs.append(id)
esjobstr = ''
if isEventService(job):
## for ES jobs, prepare the event table
esjobdict = {}
for s in eventservicestatelist:
esjobdict[s] = 0
evtable = JediEvents.objects.filter(pandaid=job['pandaid']).order_by('-def_min_eventid').values('fileid', 'datasetid', 'def_min_eventid','def_max_eventid','processed_upto_eventid','status','job_processid','attemptnr')
fileids = {}
datasetids = {}
#for evrange in evtable:
# fileids[int(evrange['fileid'])] = {}
# datasetids[int(evrange['datasetid'])] = {}
flist = []
for f in fileids:
flist.append(f)
dslist = []
for ds in datasetids:
dslist.append(ds)
#datasets = JediDatasets.objects.filter(datasetid__in=dslist).values()
dsfiles = JediDatasetContents.objects.filter(fileid__in=flist).values()
#for ds in datasets:
# datasetids[int(ds['datasetid'])]['dict'] = ds
#for f in dsfiles:
# fileids[int(f['fileid'])]['dict'] = f
for evrange in evtable:
#evrange['fileid'] = fileids[int(evrange['fileid'])]['dict']['lfn']
#evrange['datasetid'] = datasetids[evrange['datasetid']]['dict']['datasetname']
evrange['status'] = eventservicestatelist[evrange['status']]
esjobdict[evrange['status']] += 1
evrange['attemptnr'] = 10-evrange['attemptnr']
esjobstr = ''
for s in esjobdict:
if esjobdict[s] > 0:
esjobstr += " %s(%s) " % ( s, esjobdict[s] )
else:
evtable = []
## For CORE, pick up parameters from jobparams
if VOMODE == 'core' or ('vo' in job and job['vo'] == 'core'):
coreData = {}
if jobparams:
coreParams = re.match('.*PIPELINE_TASK\=([a-zA-Z0-9]+).*PIPELINE_PROCESSINSTANCE\=([0-9]+).*PIPELINE_STREAM\=([0-9\.]+)',jobparams)
if coreParams:
coreData['pipelinetask'] = coreParams.group(1)
coreData['processinstance'] = coreParams.group(2)
coreData['pipelinestream'] = coreParams.group(3)
else:
coreData = None
if 'jobstatus' in job and (job['jobstatus'] == 'failed' or job['jobstatus'] == 'holding'):
errorinfo = getErrorDescription(job)
if len(errorinfo) > 0:
job['errorinfo'] = errorinfo
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'pandaid': pandaid,
'job': job,
'columns' : columns,
'files' : files,
'dsfiles' : dsfiles,
'nfiles' : nfiles,
'logfile' : logfile,
'oslogpath' : oslogpath,
'stdout' : stdout,
'stderr' : stderr,
'stdlog' : stdlog,
'jobparams' : jobparams,
'jobid' : jobid,
'coreData' : coreData,
'logextract' : logextract,
'retries' : retries,
'pretries' : pretries,
'countOfInvocations':countOfInvocations,
'eventservice' : isEventService(job),
'evtable' : evtable[:100],
'debugmode' : debugmode,
'debugstdout' : debugstdout,
'libjob' : libjob,
'runjobs' : runjobs,
'mergejobs' : mergejobs,
'esjobstr': esjobstr,
'fileSummary':fileSummary,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
if isEventService(job):
response = render_to_response('jobInfoES.html', data, RequestContext(request))
else:
response = render_to_response('jobInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
elif (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
del request.session['TFIRST']
del request.session['TLAST']
data = {'files':files,
'job':job,
'dsfiles' : dsfiles,
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
else:
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse('not understood', mimetype='text/html')
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
def userList(request):
valid, response = initRequest(request)
if not valid: return response
nhours = 90*24
setupView(request, hours=nhours, limit=-99)
if VOMODE == 'atlas':
view = 'database'
else:
view = 'dynamic'
if 'view' in request.session['requestParams']:
view = request.session['requestParams']['view']
sumd = []
jobsumd = []
userdb = []
userdbl = []
userstats = {}
if view == 'database':
startdate = timezone.now() - timedelta(hours=nhours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = { 'latestjob__range' : [startdate, enddate] }
#viewParams['selection'] = ", last %d days" % (float(nhours)/24.)
## Use the users table
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'name':
userdb = Users.objects.filter(**query).order_by('name')
elif sortby == 'njobs':
userdb = Users.objects.filter(**query).order_by('njobsa').reverse()
elif sortby == 'date':
userdb = Users.objects.filter(**query).order_by('latestjob').reverse()
elif sortby == 'cpua1':
userdb = Users.objects.filter(**query).order_by('cpua1').reverse()
elif sortby == 'cpua7':
userdb = Users.objects.filter(**query).order_by('cpua7').reverse()
elif sortby == 'cpup1':
userdb = Users.objects.filter(**query).order_by('cpup1').reverse()
elif sortby == 'cpup7':
userdb = Users.objects.filter(**query).order_by('cpup7').reverse()
else:
userdb = Users.objects.filter(**query).order_by('name')
else:
userdb = Users.objects.filter(**query).order_by('name')
anajobs = 0
n1000 = 0
n10k = 0
nrecent3 = 0
nrecent7 = 0
nrecent30 = 0
nrecent90 = 0
## Move to a list of dicts and adjust CPU unit
for u in userdb:
udict = {}
udict['name'] = u.name
udict['njobsa'] = u.njobsa
if u.cpua1: udict['cpua1'] = "%0.1f" % (int(u.cpua1)/3600.)
if u.cpua7: udict['cpua7'] = "%0.1f" % (int(u.cpua7)/3600.)
if u.cpup1: udict['cpup1'] = "%0.1f" % (int(u.cpup1)/3600.)
if u.cpup7: udict['cpup7'] = "%0.1f" % (int(u.cpup7)/3600.)
udict['latestjob'] = u.latestjob
userdbl.append(udict)
if u.njobsa > 0: anajobs += u.njobsa
if u.njobsa >= 1000: n1000 += 1
if u.njobsa >= 10000: n10k += 1
if u.latestjob != None:
latest = timezone.now() - u.latestjob
if latest.days < 4: nrecent3 += 1
if latest.days < 8: nrecent7 += 1
if latest.days < 31: nrecent30 += 1
if latest.days < 91: nrecent90 += 1
userstats['anajobs'] = anajobs
userstats['n1000'] = n1000
userstats['n10k'] = n10k
userstats['nrecent3'] = nrecent3
userstats['nrecent7'] = nrecent7
userstats['nrecent30'] = nrecent30
userstats['nrecent90'] = nrecent90
else:
if VOMODE == 'atlas':
nhours = 12
else:
nhours = 7*24
query = setupView(request, hours=nhours, limit=5000)
## dynamically assemble user summary info
values = 'eventservice', 'produsername','cloud','computingsite','cpuconsumptiontime','jobstatus','transformation','prodsourcelabel','specialhandling','vo','modificationtime','pandaid', 'atlasrelease', 'processingtype', 'workinggroup', 'currentpriority'
jobs = QuerySetChain(\
Jobsdefined4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(*values),
Jobsactive4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(*values),
Jobswaiting4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(*values),
Jobsarchived4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(*values),
)
jobs = cleanJobList(request, jobs)
sumd = userSummaryDict(jobs)
sumparams = [ 'jobstatus', 'prodsourcelabel', 'specialhandling', 'transformation', 'processingtype', 'workinggroup', 'priorityrange', 'jobsetrange' ]
if VOMODE == 'atlas':
sumparams.append('atlasrelease')
else:
sumparams.append('vo')
jobsumd = jobSummaryDict(request, jobs, sumparams)[0]
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'xurl' : extensibleURL(request),
'url' : request.path,
'sumd' : sumd,
'jobsumd' : jobsumd,
'userdb' : userdbl,
'userstats' : userstats,
'tfirst' : TFIRST,
'tlast' : TLAST,
'plow' : PLOW,
'phigh' : PHIGH,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('userList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
elif (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
del request.session['TFIRST']
del request.session['TLAST']
resp = sumd
return HttpResponse(json.dumps(resp), mimetype='text/html')
@cache_page(60*20)
def userInfo(request, user=''):
valid, response = initRequest(request)
if not valid: return response
if user == '':
if 'user' in request.session['requestParams']: user = request.session['requestParams']['user']
if 'produsername' in request.session['requestParams']: user = request.session['requestParams']['produsername']
if 'days' in request.session['requestParams']:
days = int(request.session['requestParams']['days'])
else:
days = 7
## Tasks owned by the user
startdate = timezone.now() - timedelta(hours=days*24)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = { 'modificationtime__range' : [startdate, enddate] }
query['username__icontains'] = user.strip()
tasks = JediTasks.objects.filter(**query).values()
tasks = sorted(tasks, key=lambda x:-x['jeditaskid'])
tasks = cleanTaskList(request, tasks)
ntasks = len(tasks)
tasksumd = taskSummaryDict(request,tasks)
tasks=getTaskScoutingInfo(tasks, ntasks)
## Jobs
limit = 5000
query = setupView(request,hours=72,limit=limit)
# query['produsername__icontains'] = user.strip()
query['produsername__startswith'] = user.strip()
jobs = []
values = 'eventservice','produsername','cloud','computingsite','cpuconsumptiontime','jobstatus','transformation','prodsourcelabel','specialhandling','vo','modificationtime','pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock',
jobs.extend(Jobsdefined4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobsetids = None
if len(jobs) == 0 or (len(jobs) < limit and LAST_N_HOURS_MAX > 72):
jobs.extend(Jobsarchived.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
# if len(jobs) < limit and ntasks == 0:
# ## try at least to find some old jobsets
# startdate = timezone.now() - timedelta(hours=30*24)
# startdate = startdate.strftime(defaultDatetimeFormat)
# enddate = timezone.now().strftime(defaultDatetimeFormat)
# query = { 'modificationtime__range' : [startdate, enddate] }
# query['produsername'] = user
# jobsetids = Jobsarchived.objects.filter(**query).values('jobsetid').distinct()
jobs = cleanJobList(request, jobs)
query = { 'name__icontains' : user.strip() }
userdb = Users.objects.filter(**query).values()
if len(userdb) > 0:
userstats = userdb[0]
user = userstats['name']
for field in ['cpua1', 'cpua7', 'cpup1', 'cpup7' ]:
try:
userstats[field] = "%0.1f" % ( float(userstats[field])/3600.)
except:
userstats[field] = '-'
else:
userstats = None
## Divide up jobs by jobset and summarize
jobsets = {}
for job in jobs:
if 'jobsetid' not in job or job['jobsetid'] == None: continue
if job['jobsetid'] not in jobsets:
jobsets[job['jobsetid']] = {}
jobsets[job['jobsetid']]['jobsetid'] = job['jobsetid']
jobsets[job['jobsetid']]['jobs'] = []
jobsets[job['jobsetid']]['jobs'].append(job)
for jobset in jobsets:
jobsets[jobset]['sum'] = jobStateSummary(jobsets[jobset]['jobs'])
jobsets[jobset]['njobs'] = len(jobsets[jobset]['jobs'])
tfirst = timezone.now()
tlast = timezone.now() - timedelta(hours=2400)
plow = 1000000
phigh = -1000000
for job in jobsets[jobset]['jobs']:
if job['modificationtime'] > tlast: tlast = job['modificationtime']
if job['modificationtime'] < tfirst: tfirst = job['modificationtime']
if job['currentpriority'] > phigh: phigh = job['currentpriority']
if job['currentpriority'] < plow: plow = job['currentpriority']
jobsets[jobset]['tfirst'] = tfirst
jobsets[jobset]['tlast'] = tlast
jobsets[jobset]['plow'] = plow
jobsets[jobset]['phigh'] = phigh
jobsetl = []
jsk = jobsets.keys()
jsk.sort(reverse=True)
for jobset in jsk:
jobsetl.append(jobsets[jobset])
njobsmax = len(jobs)
if 'display_limit' in request.session['requestParams'] and int(request.session['requestParams']['display_limit']) < len(jobs):
display_limit = int(request.session['requestParams']['display_limit'])
njobsmax = display_limit
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 3000
njobsmax = display_limit
url_nolimit = request.get_full_path()
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
sumd = userSummaryDict(jobs)
flist = [ 'jobstatus', 'prodsourcelabel', 'processingtype', 'specialhandling', 'transformation', 'jobsetid', 'jeditaskid', 'computingsite', 'cloud', 'workinggroup', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'priorityrange', 'jobsetrange' ]
if VOMODE != 'atlas':
flist.append('vo')
else:
flist.append('atlasrelease')
jobsumd = jobSummaryDict(request, jobs, flist)
njobsetmax = 200
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
#print len(tasks)
#print tasks[0].jeditaskid
#for task in tasks:
# print task['reqid']
# if 'reqid' in task:
# print task.reqid
#
#
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'xurl' : xurl,
'nosorturl' : nosorturl,
'user' : user,
'sumd' : sumd,
'jobsumd' : jobsumd,
'jobList' : jobs[:njobsmax],
'njobs' : len(jobs),
'query' : query,
'userstats' : userstats,
'tfirst' : TFIRST,
'tlast' : TLAST,
'plow' : PLOW,
'phigh' : PHIGH,
'jobsets' : jobsetl[:njobsetmax-1],
'njobsetmax' : njobsetmax,
'njobsets' : len(jobsetl),
'url_nolimit' : url_nolimit,
'display_limit' : display_limit,
'tasks': tasks,
'ntasks' : ntasks,
'tasksumd' : tasksumd,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('userInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = sumd
return HttpResponse(json.dumps(resp), mimetype='text/html')
@cache_page(60*20)
def siteList(request):
valid, response = initRequest(request)
if not valid: return response
for param in request.session['requestParams']:
request.session['requestParams'][param]= escapeInput(request.session['requestParams'][param])
setupView(request, opmode='notime')
query = {}
### Add any extensions to the query determined from the URL
if VOMODE == 'core': query['siteid__contains'] = 'CORE'
prod = False
extraParCondition = '1=1'
for param in request.session['requestParams']:
if param == 'category' and request.session['requestParams'][param] == 'multicloud':
query['multicloud__isnull'] = False
if param == 'category' and request.session['requestParams'][param] == 'analysis':
query['siteid__contains'] = 'ANALY'
if param == 'category' and request.session['requestParams'][param] == 'test':
query['siteid__icontains'] = 'test'
if param == 'category' and request.session['requestParams'][param] == 'production':
prod = True
if param == 'catchall':
wildCards = request.session['requestParams'][param].split('|')
countCards = len(wildCards)
currentCardCount = 1
extraParCondition = '('
for card in wildCards:
extraParCondition += preprocessWildCardString( escapeInput(card) , 'catchall')
if (currentCardCount < countCards): extraParCondition +=' OR '
currentCardCount += 1
extraParCondition += ')'
for field in Schedconfig._meta.get_all_field_names():
if param == field and not (param == 'catchall'):
query[param] = escapeInput(request.session['requestParams'][param])
siteres = Schedconfig.objects.filter(**query).exclude(cloud='CMS').extra(where=[extraParCondition]).values()
mcpres = Schedconfig.objects.filter(status='online').exclude(cloud='CMS').exclude(siteid__icontains='test').values('siteid','multicloud','cloud').order_by('siteid')
sites = []
for site in siteres:
if 'category' in request.session['requestParams'] and request.session['requestParams']['category'] == 'multicloud':
if (site['multicloud'] == 'None') or (not re.match('[A-Z]+',site['multicloud'])): continue
sites.append(site)
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxmemory':
sites = sorted(sites, key=lambda x:-x['maxmemory'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxtime':
sites = sorted(sites, key=lambda x:-x['maxtime'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'gocname':
sites = sorted(sites, key=lambda x:x['gocname'])
else:
sites = sorted(sites, key=lambda x:x['siteid'])
if prod:
newsites = []
for site in sites:
if site['siteid'].find('ANALY') >= 0:
pass
elif site['siteid'].lower().find('test') >= 0:
pass
else:
newsites.append(site)
sites = newsites
for site in sites:
if site['maxtime'] and (site['maxtime'] > 0) : site['maxtime'] = "%.1f" % ( float(site['maxtime'])/3600. )
site['space'] = "%d" % (site['space']/1000.)
if VOMODE == 'atlas' and (len(request.session['requestParams']) == 0 or 'cloud' in request.session['requestParams']):
clouds = Cloudconfig.objects.filter().exclude(name='CMS').exclude(name='OSG').values()
clouds = sorted(clouds, key=lambda x:x['name'])
mcpsites = {}
for cloud in clouds:
cloud['display'] = True
if 'cloud' in request.session['requestParams'] and request.session['requestParams']['cloud'] != cloud['name']: cloud['display'] = False
mcpsites[cloud['name']] = []
for site in sites:
if site['siteid'] == cloud['tier1']:
cloud['space'] = site['space']
cloud['tspace'] = site['tspace']
for site in mcpres:
mcpclouds = site['multicloud'].split(',')
if cloud['name'] in mcpclouds or cloud['name'] == site['cloud']:
sited = {}
sited['name'] = site['siteid']
sited['cloud'] = site['cloud']
if site['cloud'] == cloud['name']:
sited['type'] = 'home'
else:
sited['type'] = 'mcp'
mcpsites[cloud['name']].append(sited)
cloud['mcpsites'] = ''
for s in mcpsites[cloud['name']]:
if s['type'] == 'home':
cloud['mcpsites'] += "<b>%s</b> " % s['name']
else:
cloud['mcpsites'] += "%s " % s['name']
else:
clouds = None
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
sumd = siteSummaryDict(sites)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'sites': sites,
'clouds' : clouds,
'sumd' : sumd,
'xurl' : xurl,
'nosorturl' : nosorturl,
}
if 'cloud' in request.session['requestParams']: data['mcpsites'] = mcpsites[request.session['requestParams']['cloud']]
#data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('siteList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = sites
return HttpResponse(json.dumps(resp), mimetype='text/html')
def siteInfo(request, site=''):
valid, response = initRequest(request)
if not valid: return response
if site == '' and 'site' in request.session['requestParams']: site = request.session['requestParams']['site']
setupView(request)
LAST_N_HOURS_MAX = 12
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'siteid__iexact' : site}
sites = Schedconfig.objects.filter(**query)
colnames = []
try:
siterec = sites[0]
colnames = siterec.get_all_fields()
except IndexError:
siterec = None
HPC = False
njobhours = 12
try:
if siterec.catchall.find('HPC') >= 0:
HPC = True
njobhours = 48
except AttributeError:
pass
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
attrs = []
if siterec:
attrs.append({'name' : 'GOC name', 'value' : siterec.gocname })
if HPC: attrs.append({'name' : 'HPC', 'value' : 'This is a High Performance Computing (HPC) supercomputer queue' })
if siterec.catchall and siterec.catchall.find('log_to_objectstore') >= 0:
attrs.append({'name' : 'Object store logs', 'value' : 'Logging to object store is enabled' })
if siterec.objectstore and len(siterec.objectstore) > 0:
fields = siterec.objectstore.split('|')
nfields = len(fields)
for nf in range (0, len(fields)):
if nf == 0:
attrs.append({'name' : 'Object store location', 'value' : fields[0] })
else:
fields2 = fields[nf].split('^')
if len(fields2) > 1:
ostype = fields2[0]
ospath = fields2[1]
attrs.append({'name' : 'Object store %s path' % ostype, 'value' : ospath })
if siterec.nickname != site:
attrs.append({'name' : 'Queue (nickname)', 'value' : siterec.nickname })
if len(sites) > 1:
attrs.append({'name' : 'Total queues for this site', 'value' : len(sites) })
attrs.append({'name' : 'Status', 'value' : siterec.status })
if siterec.comment_field and len(siterec.comment_field) > 0:
attrs.append({'name' : 'Comment', 'value' : siterec.comment_field })
attrs.append({'name' : 'Cloud', 'value' : siterec.cloud })
if siterec.multicloud and len(siterec.multicloud) > 0:
attrs.append({'name' : 'Multicloud', 'value' : siterec.multicloud })
attrs.append({'name' : 'Tier', 'value' : siterec.tier })
attrs.append({'name' : 'DDM endpoint', 'value' : siterec.ddm })
attrs.append({'name' : 'Max rss', 'value' : "%.1f GB" % (float(siterec.maxrss)/1000.) })
attrs.append({'name' : 'Min rss', 'value' : "%.1f GB" % (float(siterec.minrss)/1000.) })
if siterec.maxtime > 0:
attrs.append({'name' : 'Maximum time', 'value' : "%.1f hours" % (float(siterec.maxtime)/3600.) })
attrs.append({'name' : 'Space', 'value' : "%d TB as of %s" % ((float(siterec.space)/1000.), siterec.tspace.strftime('%m-%d %H:%M')) })
attrs.append({'name' : 'Last modified', 'value' : "%s" % (siterec.lastmod.strftime('%Y-%m-%d %H:%M')) })
iquery = {}
startdate = timezone.now() - timedelta(hours=24*30)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['at_time__range'] = [startdate, enddate]
cloudQuery = Q(description__contains='queue=%s' % siterec.nickname) | Q(description__contains='queue=%s' % siterec.siteid)
incidents = Incidents.objects.filter(**iquery).filter(cloudQuery).order_by('at_time').reverse().values()
else:
incidents = []
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'site' : siterec,
'queues' : sites,
'colnames' : colnames,
'attrs' : attrs,
'incidents' : incidents,
'name' : site,
'njobhours' : njobhours,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('siteInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
for job in jobList:
resp.append({ 'pandaid': job.pandaid, 'status': job.jobstatus, 'prodsourcelabel': job.prodsourcelabel, 'produserid' : job.produserid})
return HttpResponse(json.dumps(resp), mimetype='text/html')
def updateCacheWithListOfMismatchedCloudSites(mismatchedSites):
listOfCloudSitesMismatched = cache.get('mismatched-cloud-sites-list')
if (listOfCloudSitesMismatched is None) or (len(listOfCloudSitesMismatched) == 0):
cache.set('mismatched-cloud-sites-list', mismatchedSites, 31536000)
else:
listOfCloudSitesMismatched.extend(mismatchedSites)
listOfCloudSitesMismatched.sort()
cache.set('mismatched-cloud-sites-list', list(listOfCloudSitesMismatched for listOfCloudSitesMismatched,_ in itertools.groupby(listOfCloudSitesMismatched)), 31536000)
def getListOfFailedBeforeSiteAssignedJobs(query, mismatchedSites, notime=True):
jobs = []
querynotime = copy.deepcopy(query)
if notime: del querynotime['modificationtime__range']
siteCondition = ''
for site in mismatchedSites:
siteQuery = Q(computingsite=site[0]) & Q(cloud=site[1])
siteCondition = siteQuery if (siteCondition == '') else (siteCondition | siteQuery)
jobs.extend(Jobsactive4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobsdefined4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobswaiting4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobsarchived4.objects.filter(siteCondition).filter(**query).values('pandaid'))
jobsString=''
if (len(jobs) > 0):
jobsString = '&pandaid='
for job in jobs:
jobsString += str(job['pandaid'])+','
jobsString = jobsString[:-1]
return jobsString
def siteSummary(query, notime=True):
summary = []
querynotime = copy.deepcopy(query)
if notime:
if 'modificationtime__range' in querynotime:
del querynotime['modificationtime__range']
summary.extend(Jobsactive4.objects.filter(**querynotime).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')).order_by('cloud','computingsite','jobstatus'))
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')).order_by('cloud','computingsite','jobstatus'))
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')).order_by('cloud','computingsite','jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')).order_by('cloud','computingsite','jobstatus'))
return summary
def taskSummaryData(request, query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(Jobsactive4.objects.filter(**querynotime).values('taskid','jobstatus').annotate(Count('jobstatus')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('taskid','jobstatus').annotate(Count('jobstatus')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('taskid','jobstatus').annotate(Count('jobstatus')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsarchived4.objects.filter(**query).values('taskid','jobstatus').annotate(Count('jobstatus')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsactive4.objects.filter(**querynotime).values('jeditaskid','jobstatus').annotate(Count('jobstatus')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('jeditaskid','jobstatus').annotate(Count('jobstatus')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('jeditaskid','jobstatus').annotate(Count('jobstatus')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsarchived4.objects.filter(**query).values('jeditaskid','jobstatus').annotate(Count('jobstatus')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
return summary
def voSummary(query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(Jobsactive4.objects.filter(**querynotime).values('vo','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('vo','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('vo','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsarchived4.objects.filter(**query).values('vo','jobstatus').annotate(Count('jobstatus')))
return summary
def wgSummary(query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('workinggroup','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsactive4.objects.filter(**querynotime).values('workinggroup','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('workinggroup','jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsarchived4.objects.filter(**query).values('workinggroup','jobstatus').annotate(Count('jobstatus')))
return summary
def wnSummary(query):
summary = []
querynotime = query
# del querynotime['modificationtime__range'] ### creates inconsistency with job lists. Stick to advertised 12hrs
summary.extend(Jobsactive4.objects.filter(**querynotime).values('modificationhost', 'jobstatus').annotate(Count('jobstatus')).order_by('modificationhost', 'jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('modificationhost', 'jobstatus').annotate(Count('jobstatus')).order_by('modificationhost', 'jobstatus'))
return summary
@cache_page(60*20)
def wnInfo(request,site,wnname='all'):
""" Give worker node level breakdown of site activity. Spot hot nodes, error prone nodes. """
if 'hours' in request.REQUEST:
hours = int(request.REQUEST['hours'])
else:
hours=12
valid, response = initRequest(request)
if not valid: return response
errthreshold = 15
if wnname != 'all':
query = setupView(request,hours=hours,limit=999999)
query['modificationhost__endswith'] = wnname
else:
query = setupView(request,hours=hours,limit=999999)
query['computingsite'] = site
wnsummarydata = wnSummary(query)
totstates = {}
totjobs = 0
wns = {}
wnPlotFailed = {}
wnPlotFinished = {}
for state in sitestatelist:
totstates[state] = 0
for rec in wnsummarydata:
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
wnfull = rec['modificationhost']
wnsplit = wnfull.split('@')
if len(wnsplit) == 2:
if wnname == 'all':
wn = wnsplit[1]
else:
wn = wnfull
slot = wnsplit[0]
else:
wn = wnfull
slot = '1'
if wn.startswith('aipanda'): continue
if jobstatus == 'failed':
if not wn in wnPlotFailed: wnPlotFailed[wn] = 0
wnPlotFailed[wn] += count
elif jobstatus == 'finished':
if not wn in wnPlotFinished: wnPlotFinished[wn] = 0
wnPlotFinished[wn] += count
totjobs += count
if jobstatus not in totstates:
totstates[jobstatus] = 0
totstates[jobstatus] += count
if wn not in wns:
wns[wn] = {}
wns[wn]['name'] = wn
wns[wn]['count'] = 0
wns[wn]['states'] = {}
wns[wn]['slotd'] = {}
wns[wn]['statelist'] = []
for state in sitestatelist:
wns[wn]['states'][state] = {}
wns[wn]['states'][state]['name'] = state
wns[wn]['states'][state]['count'] = 0
if slot not in wns[wn]['slotd']: wns[wn]['slotd'][slot] = 0
wns[wn]['slotd'][slot] += 1
wns[wn]['count'] += count
if jobstatus not in wns[wn]['states']:
wns[wn]['states'][jobstatus]={}
wns[wn]['states'][jobstatus]['count']=0
wns[wn]['states'][jobstatus]['count'] += count
## Convert dict to summary list
wnkeys = wns.keys()
wnkeys.sort()
wntot = len(wnkeys)
fullsummary = []
allstated = {}
allstated['finished'] = allstated['failed'] = 0
allwns = {}
allwns['name'] = 'All'
allwns['count'] = totjobs
allwns['states'] = totstates
allwns['statelist'] = []
for state in sitestatelist:
allstate = {}
allstate['name'] = state
allstate['count'] = totstates[state]
allstated[state] = totstates[state]
allwns['statelist'].append(allstate)
if int(allstated['finished']) + int(allstated['failed']) > 0:
allwns['pctfail'] = int(100.*float(allstated['failed'])/(allstated['finished']+allstated['failed']))
else:
allwns['pctfail'] = 0
if wnname == 'all': fullsummary.append(allwns)
avgwns = {}
avgwns['name'] = 'Average'
if wntot > 0:
avgwns['count'] = "%0.2f" % (totjobs/wntot)
else:
avgwns['count'] = ''
avgwns['states'] = totstates
avgwns['statelist'] = []
avgstates = {}
for state in sitestatelist:
if wntot > 0:
avgstates[state] = totstates[state]/wntot
else:
avgstates[state] = ''
allstate = {}
allstate['name'] = state
if wntot > 0:
allstate['count'] = "%0.2f" % (int(totstates[state])/wntot)
allstated[state] = "%0.2f" % (int(totstates[state])/wntot)
else:
allstate['count'] = ''
allstated[state] = ''
avgwns['statelist'].append(allstate)
avgwns['pctfail'] = allwns['pctfail']
if wnname == 'all': fullsummary.append(avgwns)
for wn in wnkeys:
outlier = ''
wns[wn]['slotcount'] = len(wns[wn]['slotd'])
wns[wn]['pctfail'] = 0
for state in sitestatelist:
wns[wn]['statelist'].append(wns[wn]['states'][state])
if wns[wn]['states']['finished']['count'] + wns[wn]['states']['failed']['count'] > 0:
wns[wn]['pctfail'] = int(100.*float(wns[wn]['states']['failed']['count'])/(wns[wn]['states']['finished']['count']+wns[wn]['states']['failed']['count']))
if float(wns[wn]['states']['finished']['count']) < float(avgstates['finished'])/5. :
outlier += " LowFinished "
if float(wns[wn]['states']['failed']['count']) > max(float(avgstates['failed'])*3.,5.) :
outlier += " HighFailed "
wns[wn]['outlier'] = outlier
fullsummary.append(wns[wn])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x:x['pctfail'],reverse=True)
kys = wnPlotFailed.keys()
kys.sort()
wnPlotFailedL = []
for k in kys:
wnPlotFailedL.append( [ k, wnPlotFailed[k] ] )
kys = wnPlotFinished.keys()
kys.sort()
wnPlotFinishedL = []
for k in kys:
wnPlotFinishedL.append( [ k, wnPlotFinished[k] ] )
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'xurl' : xurl,
'site' : site,
'wnname' : wnname,
'user' : None,
'summary' : fullsummary,
'wnPlotFailed' : wnPlotFailedL,
'wnPlotFinished' : wnPlotFinishedL,
'hours' : hours,
'errthreshold' : errthreshold,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('wnInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'site' : site,
'wnname' : wnname,
'user' : None,
'summary' : fullsummary,
'wnPlotFailed' : wnPlotFailedL,
'wnPlotFinished' : wnPlotFinishedL,
'hours' : hours,
'errthreshold' : errthreshold,
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
def dashSummary(request, hours, limit=999999, view='all', cloudview='region', notime=True):
pilots = getPilotCounts(view)
query = setupView(request,hours=hours,limit=limit,opmode=view)
if VOMODE == 'atlas' and len(request.session['requestParams']) == 0:
cloudinfol = Cloudconfig.objects.filter().exclude(name='CMS').exclude(name='OSG').values('name','status')
else:
cloudinfol = []
cloudinfo = {}
for c in cloudinfol:
cloudinfo[c['name']] = c['status']
siteinfol = Schedconfig.objects.filter().exclude(cloud='CMS').values('siteid','status')
siteinfo = {}
for s in siteinfol:
siteinfo[s['siteid']] = s['status']
sitesummarydata = siteSummary(query, notime)
mismatchedSites = []
clouds = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
for rec in sitesummarydata:
if cloudview == 'region':
if rec['computingsite'] in homeCloud:
cloud = homeCloud[rec['computingsite']]
else:
print "ERROR cloud not known", rec
mismatchedSites.append( [rec['computingsite'], rec['cloud']])
cloud = ''
else:
cloud = rec['cloud']
site = rec['computingsite']
if view.find('test') < 0:
if view != 'analysis' and site.startswith('ANALY'): continue
if view == 'analysis' and not site.startswith('ANALY'): continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if cloud not in clouds:
print "Cloud:" + cloud
clouds[cloud] = {}
clouds[cloud]['name'] = cloud
if cloud in cloudinfo: clouds[cloud]['status'] = cloudinfo[cloud]
clouds[cloud]['count'] = 0
clouds[cloud]['pilots'] = 0
clouds[cloud]['sites'] = {}
clouds[cloud]['states'] = {}
clouds[cloud]['statelist'] = []
for state in sitestatelist:
clouds[cloud]['states'][state] = {}
clouds[cloud]['states'][state]['name'] = state
clouds[cloud]['states'][state]['count'] = 0
clouds[cloud]['count'] += count
clouds[cloud]['states'][jobstatus]['count'] += count
if site not in clouds[cloud]['sites']:
clouds[cloud]['sites'][site] = {}
clouds[cloud]['sites'][site]['name'] = site
if site in siteinfo: clouds[cloud]['sites'][site]['status'] = siteinfo[site]
clouds[cloud]['sites'][site]['count'] = 0
if site in pilots:
clouds[cloud]['sites'][site]['pilots'] = pilots[site]['count']
clouds[cloud]['pilots'] += pilots[site]['count']
else:
clouds[cloud]['sites'][site]['pilots'] = 0
clouds[cloud]['sites'][site]['states'] = {}
for state in sitestatelist:
clouds[cloud]['sites'][site]['states'][state] = {}
clouds[cloud]['sites'][site]['states'][state]['name'] = state
clouds[cloud]['sites'][site]['states'][state]['count'] = 0
clouds[cloud]['sites'][site]['count'] += count
clouds[cloud]['sites'][site]['states'][jobstatus]['count'] += count
updateCacheWithListOfMismatchedCloudSites(mismatchedSites)
## Go through the sites, add any that are missing (because they have no jobs in the interval)
if cloudview != 'cloud':
for site in pandaSites:
if view.find('test') < 0:
if view != 'analysis' and site.startswith('ANALY'): continue
if view == 'analysis' and not site.startswith('ANALY'): continue
cloud = pandaSites[site]['cloud']
if cloud not in clouds:
## Bail. Adding sites is one thing; adding clouds is another
continue
if site not in clouds[cloud]['sites']:
clouds[cloud]['sites'][site] = {}
clouds[cloud]['sites'][site]['name'] = site
if site in siteinfo: clouds[cloud]['sites'][site]['status'] = siteinfo[site]
clouds[cloud]['sites'][site]['count'] = 0
clouds[cloud]['sites'][site]['pctfail'] = 0
if site in pilots:
clouds[cloud]['sites'][site]['pilots'] = pilots[site]['count']
clouds[cloud]['pilots'] += pilots[site]['count']
else:
clouds[cloud]['sites'][site]['pilots'] = 0
clouds[cloud]['sites'][site]['states'] = {}
for state in sitestatelist:
clouds[cloud]['sites'][site]['states'][state] = {}
clouds[cloud]['sites'][site]['states'][state]['name'] = state
clouds[cloud]['sites'][site]['states'][state]['count'] = 0
## Convert dict to summary list
cloudkeys = clouds.keys()
cloudkeys.sort()
fullsummary = []
allstated = {}
allstated['finished'] = allstated['failed'] = 0
allclouds = {}
allclouds['name'] = 'All'
allclouds['count'] = totjobs
allclouds['pilots'] = 0
allclouds['sites'] = {}
allclouds['states'] = totstates
allclouds['statelist'] = []
for state in sitestatelist:
allstate = {}
allstate['name'] = state
allstate['count'] = totstates[state]
allstated[state] = totstates[state]
allclouds['statelist'].append(allstate)
if int(allstated['finished']) + int(allstated['failed']) > 0:
allclouds['pctfail'] = int(100.*float(allstated['failed'])/(allstated['finished']+allstated['failed']))
else:
allclouds['pctfail'] = 0
for cloud in cloudkeys:
allclouds['pilots'] += clouds[cloud]['pilots']
fullsummary.append(allclouds)
for cloud in cloudkeys:
for state in sitestatelist:
clouds[cloud]['statelist'].append(clouds[cloud]['states'][state])
sites = clouds[cloud]['sites']
sitekeys = sites.keys()
sitekeys.sort()
cloudsummary = []
for site in sitekeys:
sitesummary = []
for state in sitestatelist:
sitesummary.append(sites[site]['states'][state])
sites[site]['summary'] = sitesummary
if sites[site]['states']['finished']['count'] + sites[site]['states']['failed']['count'] > 0:
sites[site]['pctfail'] = int(100.*float(sites[site]['states']['failed']['count'])/(sites[site]['states']['finished']['count']+sites[site]['states']['failed']['count']))
else:
sites[site]['pctfail'] = 0
cloudsummary.append(sites[site])
clouds[cloud]['summary'] = cloudsummary
if clouds[cloud]['states']['finished']['count'] + clouds[cloud]['states']['failed']['count'] > 0:
clouds[cloud]['pctfail'] = int(100.*float(clouds[cloud]['states']['failed']['count'])/(clouds[cloud]['states']['finished']['count']+clouds[cloud]['states']['failed']['count']))
fullsummary.append(clouds[cloud])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in statelist:
fullsummary = sorted(fullsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
cloudsummary = sorted(cloudsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
for cloud in clouds:
clouds[cloud]['summary'] = sorted(clouds[cloud]['summary'], key=lambda x:x['states'][request.session['requestParams']['sortby']]['count'],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x:x['pctfail'],reverse=True)
cloudsummary = sorted(cloudsummary, key=lambda x:x['pctfail'],reverse=True)
for cloud in clouds:
clouds[cloud]['summary'] = sorted(clouds[cloud]['summary'], key=lambda x:x['pctfail'],reverse=True)
return fullsummary
def dashTaskSummary(request, hours, limit=999999, view='all'):
query = setupView(request,hours=hours,limit=limit,opmode=view)
tasksummarydata = taskSummaryData(request, query)
tasks = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
taskids = []
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskids.append( { 'jeditaskid' : rec['jeditaskid'] } )
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskids.append( { 'taskid' : rec['taskid'] } )
tasknamedict = taskNameDict(taskids)
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskid = rec['jeditaskid']
tasktype = 'JEDI'
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskid = rec['taskid']
tasktype = 'old'
else:
continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if taskid not in tasks:
tasks[taskid] = {}
tasks[taskid]['taskid'] = taskid
if taskid in tasknamedict:
tasks[taskid]['name'] = tasknamedict[taskid]
else:
tasks[taskid]['name'] = str(taskid)
tasks[taskid]['count'] = 0
tasks[taskid]['states'] = {}
tasks[taskid]['statelist'] = []
for state in sitestatelist:
tasks[taskid]['states'][state] = {}
tasks[taskid]['states'][state]['name'] = state
tasks[taskid]['states'][state]['count'] = 0
tasks[taskid]['count'] += count
tasks[taskid]['states'][jobstatus]['count'] += count
if view == 'analysis':
## Show only tasks starting with 'user.'
kys = tasks.keys()
for t in kys:
if not str(tasks[t]['name'].encode('ascii','ignore')).startswith('user.'): del tasks[t]
## Convert dict to summary list
taskkeys = tasks.keys()
taskkeys.sort()
fullsummary = []
for taskid in taskkeys:
for state in sitestatelist:
tasks[taskid]['statelist'].append(tasks[taskid]['states'][state])
if tasks[taskid]['states']['finished']['count'] + tasks[taskid]['states']['failed']['count'] > 0:
tasks[taskid]['pctfail'] = int(100.*float(tasks[taskid]['states']['failed']['count'])/(tasks[taskid]['states']['finished']['count']+tasks[taskid]['states']['failed']['count']))
fullsummary.append(tasks[taskid])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x:x['pctfail'],reverse=True)
return fullsummary
def preProcess(request):
''' todo:
0. Decide tables structure and parameters aggregates approach
1. Get List of Jobs modified later than previosly saved last modified job
2. For each of them calculate output variables of Error summary.
Factorize using set of request parameters causing different flow.
3. Save new variables in the dedicated table in form - jobid ~ variable
4. When a new query comes, select from job tables correspondent ids.
5. Select variables from the transistent table.
6. Merge them and display output.
'''
# data = {}
# dashTaskSummary_preprocess(request)
# response = render_to_response('preprocessLog.html', data, RequestContext(request))
# patch_response_headers(response, cache_timeout=-1)
return None
#class prepDashTaskSummary:
def dashTaskSummary_preprocess(request):
# query = setupView(request,hours=hours,limit=limit,opmode=view)
query = { 'modificationtime__range' : [timezone.now() - timedelta(hours=LAST_N_HOURS_MAX), timezone.now()] }
tasksummarydata = []
querynotime = query
del querynotime['modificationtime__range']
tasksummarydata.extend(Jobsactive4.objects.filter(**querynotime).values('taskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobsdefined4.objects.filter(**querynotime).values('taskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobswaiting4.objects.filter(**querynotime).values('taskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobsarchived4.objects.filter(**query).values('taskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('taskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobsactive4.objects.filter(**querynotime).values('jeditaskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobsdefined4.objects.filter(**querynotime).values('jeditaskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobswaiting4.objects.filter(**querynotime).values('jeditaskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(Jobsarchived4.objects.filter(**query).values('jeditaskid','jobstatus','computingsite','produsername','transexitcode','piloterrorcode','processingtype','prodsourcelabel').annotate(Count('jobstatus'), Count('computingsite'), Count('produsername'), Count('transexitcode'), Count('piloterrorcode'), Count('processingtype'), Count('prodsourcelabel')).order_by('jeditaskid','jobstatus')[:request.session['JOB_LIMIT']])
'''
tasks = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
taskids = []
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskids.append( { 'jeditaskid' : rec['jeditaskid'] } )
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskids.append( { 'taskid' : rec['taskid'] } )
tasknamedict = taskNameDict(taskids)
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskid = rec['jeditaskid']
tasktype = 'JEDI'
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskid = rec['taskid']
tasktype = 'old'
else:
continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if taskid not in tasks:
tasks[taskid] = {}
tasks[taskid]['taskid'] = taskid
if taskid in tasknamedict:
tasks[taskid]['name'] = tasknamedict[taskid]
else:
tasks[taskid]['name'] = str(taskid)
tasks[taskid]['count'] = 0
tasks[taskid]['states'] = {}
tasks[taskid]['statelist'] = []
for state in sitestatelist:
tasks[taskid]['states'][state] = {}
tasks[taskid]['states'][state]['name'] = state
tasks[taskid]['states'][state]['count'] = 0
tasks[taskid]['count'] += count
tasks[taskid]['states'][jobstatus]['count'] += count
if view == 'analysis':
## Show only tasks starting with 'user.'
kys = tasks.keys()
for t in kys:
if not str(tasks[t]['name'].encode('ascii','ignore')).startswith('user.'): del tasks[t]
## Convert dict to summary list
taskkeys = tasks.keys()
taskkeys.sort()
fullsummary = []
for taskid in taskkeys:
for state in sitestatelist:
tasks[taskid]['statelist'].append(tasks[taskid]['states'][state])
if tasks[taskid]['states']['finished']['count'] + tasks[taskid]['states']['failed']['count'] > 0:
tasks[taskid]['pctfail'] = int(100.*float(tasks[taskid]['states']['failed']['count'])/(tasks[taskid]['states']['finished']['count']+tasks[taskid]['states']['failed']['count']))
fullsummary.append(tasks[taskid])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x:x['pctfail'],reverse=True)
'''
return -1
#https://github.com/PanDAWMS/panda-jedi/blob/master/pandajedi/jedicore/JediCoreUtils.py
def getEffectiveFileSize(fsize,startEvent,endEvent,nEvents):
inMB = 1024 * 1024
if fsize in [None,0]:
# use dummy size for pseudo input
effectiveFsize = inMB
elif nEvents != None and startEvent != None and endEvent != None:
# take event range into account
effectiveFsize = long(float(fsize)*float(endEvent-startEvent+1)/float(nEvents))
else:
effectiveFsize = fsize
# use dummy size if input is too small
if effectiveFsize == 0:
effectiveFsize = inMB
# in MB
effectiveFsize = float(effectiveFsize) / inMB
# return
return effectiveFsize
def calculateRWwithPrio_JEDI(query):
#query = {}
retRWMap = {}
retNREMJMap = {}
values = [ 'jeditaskid', 'datasetid', 'modificationtime', 'cloud', 'nrem', 'walltime', 'fsize', 'startevent', 'endevent', 'nevents' ]
progressEntries = []
progressEntries.extend(GetRWWithPrioJedi3DAYS.objects.filter(**query).values(*values))
allCloudsRW = 0;
allCloudsNREMJ = 0;
if len(progressEntries) > 0:
for progrEntry in progressEntries:
if progrEntry['fsize'] != None:
effectiveFsize = getEffectiveFileSize(progrEntry['fsize'], progrEntry['startevent'], progrEntry['endevent'], progrEntry['nevents'])
tmpRW = progrEntry['nrem'] * effectiveFsize * progrEntry['walltime']
if not progrEntry['cloud'] in retRWMap:
retRWMap[progrEntry['cloud']] = 0
retRWMap[progrEntry['cloud']] += tmpRW
allCloudsRW += tmpRW
if not progrEntry['cloud'] in retNREMJMap:
retNREMJMap[progrEntry['cloud']] = 0
retNREMJMap[progrEntry['cloud']] += progrEntry['nrem']
allCloudsNREMJ += progrEntry['nrem']
retRWMap['All'] = allCloudsRW
retNREMJMap['All'] = allCloudsNREMJ
for cloudName, rwValue in retRWMap.iteritems():
retRWMap[cloudName] = int(rwValue/24/3600)
return retRWMap, retNREMJMap
@cache_page(60*20)
def worldjobs(request):
valid, response = initRequest(request)
query = {}
values = [ 'nucleus', 'computingsite', 'jobstatus', 'countjobsinstate' ]
worldTasksSummary = []
worldTasksSummary.extend(JobsWorldView.objects.filter(**query).values(*values))
nucleus = {}
statelist1 = statelist
# del statelist1[statelist1.index('jclosed')]
# del statelist1[statelist1.index('pending')]
if len(worldTasksSummary) > 0:
for jobs in worldTasksSummary:
if jobs['nucleus'] in nucleus:
if jobs['computingsite'] in nucleus[jobs['nucleus']]:
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']]={}
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
nucleusSummary = {}
for nucleusInfo in nucleus:
nucleusSummary[nucleusInfo] = {}
for site in nucleus[nucleusInfo]:
for state in nucleus[nucleusInfo][site]:
if state in nucleusSummary[nucleusInfo]:
nucleusSummary[nucleusInfo][state] += nucleus[nucleusInfo][site][state]
else:
nucleusSummary[nucleusInfo][state] = nucleus[nucleusInfo][site][state]
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
# del request.session['TFIRST']
# del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'nucleuses': nucleus,
'nucleussummary': nucleusSummary,
'statelist':statelist1,
'xurl' : xurl,
'nosorturl' : nosorturl,
'user' : None,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('worldjobs.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
# del request.session['TFIRST']
# del request.session['TLAST']
data = {
}
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
@cache_page(60*20)
def worldhs06s(request):
valid, response = initRequest(request)
roundflag=False
condition=''
for param in request.session['requestParams']:
if param=='reqid':
condition+= ('t.reqid=' + str(request.session['requestParams']['reqid']))
if param=='jeditaskid' and len(condition)>1:
condition+= (' AND t.jeditaskid=' + str(request.session['requestParams']['jeditaskid']))
elif param=='jeditaskid':
condition+= ('t.jeditaskid=' + str(request.session['requestParams']['jeditaskid']))
if len(condition) < 1:
condition = '(1=1)'
roundflag=True
cur = connection.cursor()
cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETHS06SSUMMARY('%s'))" % condition)
hspersite = cur.fetchall()
cur.close()
newcur = connection.cursor()
newcur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETHS06STOTSUMMARY('%s'))" % condition)
hspernucleus = newcur.fetchall()
newcur.close()
keys = [ 'nucleus', 'computingsite', 'usedhs06spersite', 'failedhs06spersite' ]
totkeys = [ 'nucleus', 'ntaskspernucleus', 'toths06spernucleus' ]
worldHS06sSummary = [dict(zip(keys,row)) for row in hspersite]
worldHS06sTotSummary = [dict(zip(totkeys,row)) for row in hspernucleus]
worldHS06sSummaryByNucleus = {}
nucleus={}
totnucleus={}
for nucl in worldHS06sTotSummary:
totnucleus[nucl['nucleus']]={}
totnucleus[nucl['nucleus']]['ntaskspernucleus']=nucl['ntaskspernucleus']
if roundflag:
totnucleus[nucl['nucleus']]['toths06spernucleus']=round(nucl['toths06spernucleus']/1000./3600/24,2) if nucl['toths06spernucleus'] is not None else 0
else:
totnucleus[nucl['nucleus']]['toths06spernucleus']=nucl['toths06spernucleus'] if nucl['toths06spernucleus'] is not None else 0
for site in worldHS06sSummary:
if site['nucleus'] not in nucleus:
nucleus[site['nucleus']]=[]
dictsite={}
dictsite['computingsite']=site['computingsite']
dictsite['usedhs06spersite']=site['usedhs06spersite'] if site['usedhs06spersite'] else 0
dictsite['failedhs06spersite']=site['failedhs06spersite'] if site['failedhs06spersite'] else 0
dictsite['failedhs06spersitepct']=100*dictsite['failedhs06spersite']/dictsite['usedhs06spersite'] if (site['usedhs06spersite'] and site['usedhs06spersite']>0) else 0
nucleus[site['nucleus']].append(dictsite)
for nuc in nucleus:
worldHS06sSummaryByNucleus[nuc]={}
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus']=sum([site['usedhs06spersite'] for site in nucleus[nuc]])
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus']=sum([site['failedhs06spersite'] for site in nucleus[nuc]])
if roundflag:
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] = round(worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus']/1000./3600/24,2)
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus'] = round(worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus']/1000./3600/24,2)
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleuspct']=int(100*worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus']/worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus']) if worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] and worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus']>0 else 0
if nuc in totnucleus:
worldHS06sSummaryByNucleus[nuc]['ntaskspernucleus']=totnucleus[nuc]['ntaskspernucleus']
worldHS06sSummaryByNucleus[nuc]['toths06spernucleus']=totnucleus[nuc]['toths06spernucleus']
if 'sortby' in request.session['requestParams']:
sortby=request.session['requestParams']['sortby']
reverseflag=False
if request.session['requestParams']['sortby']=='used-desc':
sortcol='usedhs06spersite'
reverseflag=True
elif request.session['requestParams']['sortby']=='used-asc':
sortcol='usedhs06spersite'
elif request.session['requestParams']['sortby']=='failed-desc':
sortcol='failedhs06spersite'
reverseflag=True
elif request.session['requestParams']['sortby']=='failed-asc':
sortcol='failedhs06spersite'
elif request.session['requestParams']['sortby']=='failedpct-desc':
sortcol='failedhs06spersitepct'
reverseflag=True
elif request.session['requestParams']['sortby']=='failedpct-asc':
sortcol='failedhs06spersitepct'
elif request.session['requestParams']['sortby']=='satellite-desc':
sortcol='computingsite'
reverseflag=True
else:
sortcol='computingsite'
for nuc in nucleus:
nucleus[nuc]=sorted(nucleus[nuc],key=lambda x:x[sortcol],reverse=reverseflag)
else:
sortby = 'satellite-asc'
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'xurl' : xurl,
'nosorturl' : nosorturl,
'user' : None,
'hssitesum' : nucleus,
'hsnucleussum' : worldHS06sSummaryByNucleus,
'roundflag':roundflag,
'sortby' : sortby,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('worldHS06s.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
data = {
}
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
@cache_page(60*20)
def dashboard(request, view='production'):
valid, response = initRequest(request)
if not valid: return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
hours = 24*taskdays
else:
hours = 12
hoursSinceUpdate = 36
if view == 'production':
noldtransjobs, transclouds, transrclouds = stateNotUpdated(request, state='transferring', hoursSinceUpdate=hoursSinceUpdate, count=True)
else:
hours = 3
noldtransjobs = 0
transclouds = []
transrclouds = []
errthreshold = 10
query = setupView(request,hours=hours,limit=999999,opmode=view)
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'task':
return dashTasks(request, hours, view)
if VOMODE != 'atlas':
vosummarydata = voSummary(query)
vos = {}
for rec in vosummarydata:
vo = rec['vo']
#if vo == None: vo = 'Unassigned'
if vo == None: continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if vo not in vos:
vos[vo] = {}
vos[vo]['name'] = vo
vos[vo]['count'] = 0
vos[vo]['states'] = {}
vos[vo]['statelist'] = []
for state in sitestatelist:
vos[vo]['states'][state] = {}
vos[vo]['states'][state]['name'] = state
vos[vo]['states'][state]['count'] = 0
vos[vo]['count'] += count
vos[vo]['states'][jobstatus]['count'] += count
## Convert dict to summary list
vokeys = vos.keys()
vokeys.sort()
vosummary = []
for vo in vokeys:
for state in sitestatelist:
vos[vo]['statelist'].append(vos[vo]['states'][state])
if int(vos[vo]['states']['finished']['count']) + int(vos[vo]['states']['failed']['count']) > 0:
vos[vo]['pctfail'] = int(100.*float(vos[vo]['states']['failed']['count'])/(vos[vo]['states']['finished']['count']+vos[vo]['states']['failed']['count']))
vosummary.append(vos[vo])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in statelist:
vosummary = sorted(vosummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
vosummary = sorted(vosummary, key=lambda x:x['pctfail'],reverse=True)
else:
if view == 'production':
errthreshold = 5
else:
errthreshold = 15
vosummary = []
cloudview = 'cloud'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production':
cloudview = 'N/A'
fullsummary = dashSummary(request, hours=hours, view=view, cloudview=cloudview)
cloudTaskSummary = wgTaskSummary(request,fieldname='cloud', view=view, taskdays=taskdays)
jobsLeft = {}
rw = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
request.session['max_age_minutes'] = 6
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'xurl' : xurl,
'nosorturl' : nosorturl,
'user' : None,
'summary' : fullsummary,
'vosummary' : vosummary,
'view' : view,
'mode' : 'site',
'cloudview': cloudview,
'hours' : hours,
'errthreshold' : errthreshold,
'cloudTaskSummary' : cloudTaskSummary ,
'taskstates' : taskstatedict,
'taskdays' : taskdays,
'noldtransjobs' : noldtransjobs,
'transclouds' : transclouds,
'transrclouds' : transrclouds,
'hoursSinceUpdate' : hoursSinceUpdate,
'jobsLeft' : jobsLeft,
'rw': rw
}
##self monitor
endSelfMonitor(request)
response = render_to_response('dashboard.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'summary' : fullsummary,
'vosummary' : vosummary,
'view' : view,
'mode' : 'site',
'cloudview': cloudview,
'hours' : hours,
'errthreshold' : errthreshold,
'cloudTaskSummary' : cloudTaskSummary ,
'taskstates' : taskstatedict,
'taskdays' : taskdays,
'noldtransjobs' : noldtransjobs,
'transclouds' : transclouds,
'transrclouds' : transrclouds,
'hoursSinceUpdate' : hoursSinceUpdate,
'jobsLeft' : jobsLeft,
'rw': rw
}
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
def dashAnalysis(request):
return dashboard(request,view='analysis')
def dashProduction(request):
return dashboard(request,view='production')
def dashTasks(request, hours, view='production'):
valid, response = initRequest(request)
if not valid: return response
if view == 'production':
errthreshold = 5
else:
errthreshold = 15
if 'days' in request.session['requestParams']:
taskdays = int(request.session['requestParams']['days'])
else:
taskdays = 7
hours = taskdays*24
query = setupView(request,hours=hours,limit=999999,opmode=view, querytype='task')
cloudTaskSummary = wgTaskSummary(request,fieldname='cloud', view=view, taskdays=taskdays)
#taskJobSummary = dashTaskSummary(request, hours, view) not particularly informative
taskJobSummary = []
if 'display_limit' in request.session['requestParams']:
try:
display_limit = int(request.session['requestParams']['display_limit'])
except:
display_limit = 300
else:
display_limit = 300
cloudview = 'cloud'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production':
cloudview = 'N/A'
fullsummary = dashSummary(request, hours=hours, view=view, cloudview=cloudview)
jobsLeft = {}
rw = {}
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
leftCount = 0
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'xurl' : xurl,
'nosorturl' : nosorturl,
'user' : None,
'view' : view,
'mode' : 'task',
'hours' : hours,
'errthreshold' : errthreshold,
'cloudTaskSummary' : cloudTaskSummary,
'taskstates' : taskstatedict,
'taskdays' : taskdays,
'taskJobSummary' : taskJobSummary[:display_limit],
'display_limit' : display_limit,
'jobsLeft' : jobsLeft,
'rw': rw
}
##self monitor
endSelfMonitor(request)
response = render_to_response('dashboard.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
remainingEvents = RemainedEventsPerCloud3dayswind.objects.values('cloud','nrem')
remainingEventsSet = {}
for remev in remainingEvents:
remainingEventsSet[remev['cloud']] = remev['nrem']
data = {
'jobsLeft' : jobsLeft,
'remainingWeightedEvents':remainingEventsSet,
}
return HttpResponse(json.dumps(data), mimetype='text/html')
@cache_page(60*20)
def taskESExtendedInfo(request):
if 'jeditaskid' in request.REQUEST:
jeditaskid = int(request.REQUEST['jeditaskid'])
else:
return HttpResponse("Not jeditaskid supplied", mimetype='text/html')
jquery = {}
jquery['jeditaskid'] = jeditaskid
jobs = []
jobs.extend(Jobsactive4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
taskdict = {}
for job in jobs:
taskdict[job['pandaid']] = job['jeditaskid']
estaskdict = {}
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
new_cur.execute(
"SELECT PANDAID,STATUS FROM ATLAS_PANDA.JEDI_EVENTS WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
tmpTableName, transactionKey))
evtable = dictfetchall(new_cur)
# esquery = {}
# esquery['pandaid__in'] = esjobs
# evtable = JediEvents.objects.filter(**esquery).values('pandaid','status')
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
for ev in evtable:
taskid = taskdict[ev['PANDAID']]
if taskid not in estaskdict:
estaskdict[taskid] = {}
for s in eventservicestatelist:
estaskdict[taskid][s] = 0
evstat = eventservicestatelist[ev['STATUS']]
estaskdict[taskid][evstat] += 1
estaskstr = ''
if jeditaskid in estaskdict:
for s in estaskdict[taskid]:
if estaskdict[taskid][s] > 0:
estaskstr += " %s(%s) " % (s, estaskdict[taskid][s])
return HttpResponse(estaskstr, mimetype='text/html')
@csrf_exempt
@cache_page(60*20)
def taskList(request):
valid, response = initRequest(request)
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
else:
limit = 5000
if not valid: return response
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('anal'):
hours = 3*24
else:
hours = 7*24
eventservice = False
if 'eventservice' in request.session['requestParams'] and ( request.session['requestParams']['eventservice']=='eventservice' or request.session['requestParams']['eventservice']=='1'): eventservice = True
if eventservice: hours = 7*24
query, wildCardExtension,LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
if 'statenotupdated' in request.session['requestParams']:
tasks = taskNotUpdated(request, query, wildCardExtension)
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values()
tasks = cleanTaskList(request, tasks)
ntasks = len(tasks)
nmax = ntasks
# if 'display_limit' in request.session['requestParams']:
# and int(request.session['requestParams']['display_limit']) < nmax:
# display_limit = int(request.session['requestParams']['display_limit'])
# nmax = display_limit
# url_nolimit = removeParam(request.get_full_path(), 'display_limit')
# else:
# display_limit = 300
# nmax = display_limit
# url_nolimit = request.get_full_path()
if 'display_limit' not in request.session['requestParams']:
display_limit = 300
url_nolimit = request.get_full_path() +"&display_limit="+str(nmax)
else:
display_limit = int(request.session['requestParams']['display_limit'])
nmax = display_limit
url_nolimit = request.get_full_path() +"&display_limit="+str(nmax)
#from django.db import connection
#print 'SQL query:', connection.queries
tasks=getTaskScoutingInfo(tasks,nmax)
## For event service, pull the jobs and event ranges
doESCalc = False
if eventservice and doESCalc:
taskl = []
for task in tasks:
taskl.append(task['jeditaskid'])
jquery = {}
jquery['jeditaskid__in'] = taskl
jobs = []
jobs.extend(Jobsactive4.objects.filter(**jquery).values('pandaid','jeditaskid'))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values('pandaid','jeditaskid'))
taskdict = {}
for job in jobs:
taskdict[job['pandaid']] = job['jeditaskid']
estaskdict = {}
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id,transactionKey))
query = """INSERT INTO """+tmpTableName+"""(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
new_cur.execute("SELECT PANDAID,STATUS FROM ATLAS_PANDA.JEDI_EVENTS WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey))
evtable = dictfetchall(new_cur)
# esquery = {}
# esquery['pandaid__in'] = esjobs
# evtable = JediEvents.objects.filter(**esquery).values('pandaid','status')
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
for ev in evtable:
taskid = taskdict[ev['PANDAID']]
if taskid not in estaskdict:
estaskdict[taskid] = {}
for s in eventservicestatelist:
estaskdict[taskid][s] = 0
evstat = eventservicestatelist[ev['STATUS']]
estaskdict[taskid][evstat] += 1
for task in tasks:
taskid = task['jeditaskid']
if taskid in estaskdict:
estaskstr = ''
for s in estaskdict[taskid]:
if estaskdict[taskid][s] > 0:
estaskstr += " %s(%s) " % ( s, estaskdict[taskid][s] )
task['estaskstr'] = estaskstr
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, tasks=tasks)
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
if (('HTTP_ACCEPT' in request.META) and(request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
## Add info to the json dump if the request is for a single task
if len(tasks) == 1:
id = tasks[0]['jeditaskid']
dsquery = { 'jeditaskid' : id, 'type__in' : ['input', 'output'] }
dsets = JediDatasets.objects.filter(**dsquery).values()
dslist = []
for ds in dsets:
dslist.append(ds)
tasks[0]['datasets'] = dslist
dump = json.dumps(tasks, cls=DateEncoder)
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(dump, mimetype='text/html')
else:
sumd = taskSummaryDict(request,tasks)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'tasks': tasks[:nmax],
'ntasks' : ntasks,
'sumd' : sumd,
'xurl' : xurl,
'nosorturl' : nosorturl,
'url_nolimit' : url_nolimit,
'display_limit' : display_limit,
'flowstruct' : flowstruct,
}
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('taskListES.html', data, RequestContext(request))
else:
response = render_to_response('taskList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
def getTaskScoutingInfo(tasks,nmax):
taskslToBeDisplayed = tasks[:nmax]
tasksIdToBeDisplayed = [task['jeditaskid'] for task in taskslToBeDisplayed]
tquery = {}
tquery['jeditaskid__in'] = tasksIdToBeDisplayed
tasksEventInfo = GetEventsForTask.objects.filter(**tquery).values('jeditaskid','totevrem', 'totev')
failedInScouting = JediDatasets.objects.filter(**tquery).extra(where=['NFILESFAILED > NFILESTOBEUSED']).values('jeditaskid')
taskStatuses = dict((task['jeditaskid'], task['status']) for task in tasks)
failedInScouting = [ item['jeditaskid'] for item in failedInScouting if (taskStatuses[item['jeditaskid']] in ('failed', 'broken'))]
#scoutingHasCritFailures
tquery['nfilesfailed__gt'] = 0
scoutingHasCritFailures = JediDatasets.objects.filter(**tquery).values('jeditaskid')
scoutingHasCritFailures = [ item['jeditaskid'] for item in scoutingHasCritFailures if (taskStatuses[item['jeditaskid']] in ('scouting'))]
tquery = {}
tquery['nfilesfailed'] = 0
tquery['jeditaskid__in'] = tasksIdToBeDisplayed
scoutingHasNonCritFailures = JediDatasets.objects.filter(**tquery).values('jeditaskid')
scoutingHasNonCritFailures = [ item['jeditaskid'] for item in scoutingHasNonCritFailures if (taskStatuses[item['jeditaskid']] == 'scouting' and item['jeditaskid'] not in scoutingHasCritFailures )]
tquery = {}
tquery['jeditaskid__in'] = scoutingHasNonCritFailures
tquery['relationtype'] = 'retry'
scoutingHasNonCritFailures = JediJobRetryHistory.objects.filter(**tquery).values('jeditaskid')
scoutingHasNonCritFailures = [ item['jeditaskid'] for item in scoutingHasNonCritFailures]
for task in taskslToBeDisplayed:
correspondendEventInfo = filter(lambda n: n.get('jeditaskid') == task['jeditaskid'], tasksEventInfo)
if len(correspondendEventInfo) > 0:
task['totevrem'] = int(correspondendEventInfo[0]['totevrem'])
task['totev'] = correspondendEventInfo[0]['totev']
else:
task['totevrem'] = 0
task['totev'] = 0
if (task['jeditaskid'] in failedInScouting):
task['failedscouting'] = True
if (task['jeditaskid'] in scoutingHasCritFailures):
task['scoutinghascritfailures'] = True
if (task['jeditaskid'] in scoutingHasNonCritFailures):
task['scoutinghasnoncritfailures'] = True
return tasks
def getSummaryForTaskList(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
else:
limit = 5000
if not valid: return response
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith(
'anal'):
hours = 3 * 24
else:
hours = 7 * 24
eventservice = False
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams']['eventservice'] == '1'): eventservice = True
if eventservice: hours = 7 * 24
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task',wildCardExt=True)
if 'statenotupdated' in request.session['requestParams']:
tasks = taskNotUpdated(request, query, wildCardExtension)
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values('jeditaskid','status','creationdate','modificationtime')
taskl = []
for t in tasks:
taskl.append(t['jeditaskid'])
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
taskEvents=[]
random.seed()
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
for id in taskl:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (tmpTableName, id, transactionKey)) # Backend dependable
connection.commit()
taske = GetEventsForTask.objects.extra(where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values()
for task in taske:
taskEvents.append(task)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
nevents={'neventstot':0, 'neventsrem':0}
for task in taskEvents:
if 'totev' in task and task['totev'] is not None:
nevents['neventstot']+=task['totev']
if 'totevrem' in task and task['totevrem'] is not None:
nevents['neventsrem']+=task['totevrem']
endSelfMonitor(request)
del request.session['TFIRST']
del request.session['TLAST']
response = render_to_response('taskListSummary.html', {'nevents': nevents}, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@cache_page(60*20)
def runningProdTasks(request):
valid, response = initRequest(request)
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
processingtypelist=[]
tquery={}
extraquery="WORKINGGROUP NOT IN ('AP_REPR', 'AP_VALI', 'GP_PHYS', 'GP_THLT')"
# if 'simtype' in request.session['requestParams']:
# tasks=[task for task in tasks if task['simtype']==request.session['requestParams']['simtype']]
if 'processingtype' in request.session['requestParams']:
tquery['processingtype']=request.session['requestParams']['processingtype']
else:
tquery['processingtype__in']=[ 'evgen' , 'pile', 'simul', 'recon' ]
if 'username' in request.session['requestParams']:
tquery['username']=request.session['requestParams']['username']
if 'campaign' in request.session['requestParams']:
tquery['campaign__contains']=request.session['requestParams']['campaign']
else:
tquery['campaign__contains']='MC'
if 'corecount' in request.session['requestParams']:
tquery['corecount']=request.session['requestParams']['corecount']
if 'status' in request.session['requestParams']:
tquery['status']=request.session['requestParams']['status']
else:
extraquery+=" AND STATUS NOT IN ('cancelled', 'failed','broken','aborted', 'finished', 'done')"
tquery['tasktype'] = 'prod'
tquery['prodsourcelabel']='managed'
# variables = ['campaign','jeditaskid','reqid','datasetname','status','username','workinggroup','currentpriority','processingtype','type','corecount','creationdate','taskname']
tasks = JediTasks.objects.filter(**tquery).extra(where=[extraquery]).values('campaign','jeditaskid','reqid','status','username','workinggroup','currentpriority','processingtype','corecount','creationdate','taskname','splitrule','username')
ntasks = len(tasks)
slots=0
ages=[]
simtypes=[]
datasets=[]
neventsAFIItasksSum={'evgen':0 , 'pile':0, 'simul':0, 'recon':0}
neventsFStasksSum={'evgen':0 , 'pile':0, 'simul':0, 'recon':0}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
## Get status of input processing as indicator of task progress
dsquery = {}
dsquery['type__in'] = ['input', 'pseudo_input' ]
dsquery['masterid__isnull'] = True
taskl = []
for t in tasks:
taskl.append(t['jeditaskid'])
jquery={'jobstatus':'running'}
random.seed()
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
for id in taskl:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (tmpTableName,id,transactionKey)) # Backend dependable
connection.commit()
datasets = JediDatasets.objects.filter(**dsquery).extra(where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid','nfiles','nfilesfinished','nfilesfailed','nevents', 'neventsused','type', 'masterid','datasetname')
dsinfo = {}
if len(datasets) > 0:
for ds in datasets:
taskid = ds['jeditaskid']
if taskid not in dsinfo:
dsinfo[taskid] = []
dsinfo[taskid].append(ds)
rjobslist = Jobsactive4.objects.filter(**jquery).extra(where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid').annotate(count=Count('jeditaskid'))
rjobs={}
if len(rjobslist)>0:
for rjob in rjobslist:
taskid = rjob['jeditaskid']
if taskid not in rjobs:
rjobs[taskid] = []
rjobs[taskid].append(rjob['count'])
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
neventsTotSum=0
neventsUsedTotSum=0
rjobs1coreTot=0
rjobs8coreTot=0
for task in tasks:
neventsTot=0
neventsUsedTot=0
nfailed=0
if (task['jeditaskid'] in dsinfo):
for ds in dsinfo[task['jeditaskid']]:
if int(ds['nevents'])>0:
neventsTot += ds['nevents']
neventsUsedTot += ds['neventsused']
if int(ds['nfiles'])>0:
nfailed+=ds['nfilesfailed']
if neventsTot>0:
task['percentage']=round(100.*neventsUsedTot/neventsTot,1)
else:
task['percentage']=0.
neventsTotSum+=neventsTot
neventsUsedTotSum+=neventsUsedTot
task['nevents']=neventsTot
task['neventsused']=neventsUsedTot
task['nfilesfailed']=nfailed
if (task['jeditaskid'] in rjobs):
task['rjobs']=rjobs[task['jeditaskid']][0]
slots+=int(rjobs[task['jeditaskid']][0])*task['corecount']
else:
task['rjobs']=0
if task['corecount']==1:
rjobs1coreTot+=task['rjobs']
if task['corecount']==8:
rjobs8coreTot+=task['rjobs']
task['age']=(datetime.now()-task['creationdate']).days
ages.append(task['age'])
if len(task['campaign'].split(':'))>1:
task['cutcampaign']=task['campaign'].split(':')[1]
else:
task['cutcampaign']=task['campaign'].split(':')[0]
task['datasetname']=task['taskname'].split('.')[1]
ltag = len(task['taskname'].split("_"))
rtag=task['taskname'].split("_")[ltag-1]
if "." in rtag :
rtag = rtag.split(".")[len(rtag.split("."))-1]
if 'a' in rtag:
task['simtype']='AFII'
neventsAFIItasksSum[task['processingtype']]+=neventsTot
else:
task['simtype']='FS'
neventsFStasksSum[task['processingtype']]+=neventsTot
plotageshistogram=1
if sum(ages)==0: plotageshistogram=0
sumd=taskSummaryDict(request, tasks, ['status','processingtype','simtype'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'campaign-asc':
tasks = sorted(tasks, key=lambda x:x['campaign'])
elif sortby == 'campaign-desc':
tasks = sorted(tasks, key=lambda x:x['campaign'],reverse=True)
elif sortby == 'reqid-asc':
tasks = sorted(tasks, key=lambda x:x['reqid'])
elif sortby == 'reqid-desc':
tasks = sorted(tasks, key=lambda x:x['reqid'], reverse=True)
elif sortby == 'jeditaskid-asc':
tasks = sorted(tasks, key=lambda x:x['jeditaskid'])
elif sortby == 'jeditaskid-desc':
tasks = sorted(tasks, key=lambda x:x['jeditaskid'],reverse=True)
elif sortby == 'rjobs-asc':
tasks = sorted(tasks, key=lambda x:x['rjobs'])
elif sortby == 'rjobs-desc':
tasks = sorted(tasks, key=lambda x:x['rjobs'], reverse=True)
elif sortby == 'status-asc':
tasks = sorted(tasks, key=lambda x:x['status'])
elif sortby == 'status-desc':
tasks = sorted(tasks, key=lambda x:x['status'],reverse=True)
elif sortby == 'processingtype-asc':
tasks = sorted(tasks, key=lambda x:x['processingtype'])
elif sortby == 'processingtype-desc':
tasks = sorted(tasks, key=lambda x:x['processingtype'],reverse=True)
elif sortby == 'nevents-asc':
tasks = sorted(tasks, key=lambda x:x['nevents'])
elif sortby == 'nevents-desc':
tasks = sorted(tasks, key=lambda x:x['nevents'], reverse=True)
elif sortby == 'neventsused-asc':
tasks = sorted(tasks, key=lambda x:x['neventsused'])
elif sortby == 'neventsused-desc':
tasks = sorted(tasks, key=lambda x:x['neventsused'], reverse=True)
elif sortby == 'percentage-asc':
tasks = sorted(tasks, key=lambda x:x['percentage'])
elif sortby == 'percentage-desc':
tasks = sorted(tasks, key=lambda x:x['percentage'], reverse=True)
elif sortby == 'nfilesfailed-asc':
tasks = sorted(tasks, key=lambda x:x['nfilesfailed'])
elif sortby == 'nfilesfailed-desc':
tasks = sorted(tasks, key=lambda x:x['nfilesfailed'], reverse=True)
elif sortby == 'priority-asc':
tasks = sorted(tasks, key=lambda x:x['currentpriority'])
elif sortby == 'priority-desc':
tasks = sorted(tasks, key=lambda x:x['currentpriority'], reverse=True)
elif sortby == 'simtype-asc':
tasks = sorted(tasks, key=lambda x:x['simtype'])
elif sortby == 'simtype-desc':
tasks = sorted(tasks, key=lambda x:x['simtype'], reverse=True)
elif sortby == 'age-asc':
tasks = sorted(tasks, key=lambda x:x['age'])
elif sortby == 'age-desc':
tasks = sorted(tasks, key=lambda x:x['age'], reverse=True)
elif sortby == 'corecount-asc':
tasks = sorted(tasks, key=lambda x:x['corecount'])
elif sortby == 'corecount-desc':
tasks = sorted(tasks, key=lambda x:x['corecount'], reverse=True)
elif sortby == 'username-asc':
tasks = sorted(tasks, key=lambda x:x['username'])
elif sortby == 'username-desc':
tasks = sorted(tasks, key=lambda x:x['username'], reverse=True)
elif sortby == 'datasetname-asc':
tasks = sorted(tasks, key=lambda x:x['datasetname'])
elif sortby == 'datasetname-desc':
tasks = sorted(tasks, key=lambda x:x['datasetname'], reverse=True)
else:
sortby = 'age-asc'
tasks = sorted(tasks, key=lambda x:x['age'])
if (('HTTP_ACCEPT' in request.META) and(request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
dump = json.dumps(tasks, cls=DateEncoder)
return HttpResponse(dump, mimetype='text/html')
else:
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'xurl' : xurl,
'nosorturl' : nosorturl,
'tasks': tasks,
'ntasks' : ntasks,
'sortby' : sortby,
'ages': ages,
'simtypes': simtypes,
'slots': slots,
'sumd': sumd,
'neventsUsedTotSum': round(neventsUsedTotSum/1000000.,1),
'neventsTotSum': round(neventsTotSum/1000000.,1),
'rjobs1coreTot': rjobs1coreTot,
'rjobs8coreTot': rjobs8coreTot,
'neventsAFIItasksSum': neventsAFIItasksSum,
'neventsFStasksSum': neventsFStasksSum,
'plotageshistogram': plotageshistogram,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('runningProdTasks.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
def getBrokerageLog(request):
iquery = {}
iquery['type']='prod_brokerage'
iquery['name']='panda.mon.jedi'
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
if 'hours' not in request.session['requestParams']:
hours = 72
else:
hours = int(request.session['requestParams']['hours'])
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['bintime__range'] = [startdate, enddate]
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[:request.session['JOB_LIMIT']].values()
sites = {}
for record in records:
message = records['message']
print message
@cache_page(60*20)
def taskInfo(request, jeditaskid=0):
jeditaskid = int(jeditaskid)
valid, response = initRequest(request)
furl = request.get_full_path()
nomodeurl = removeParam(furl, 'mode',mode='extensible')
if not valid: return response
if 'taskname' in request.session['requestParams'] and request.session['requestParams']['taskname'].find('*') >= 0:
return taskList(request)
setupView(request, hours=365*24, limit=999999999, querytype='task')
eventservice = False
query = {}
tasks = []
taskrec = None
colnames = []
columns = []
jobsummary = []
maxpss = []
walltime = []
jobsummaryESMerge = []
jobsummaryPMERGE = []
if 'jeditaskid' in request.session['requestParams']: jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid != 0:
query = {'jeditaskid' : jeditaskid}
tasks = JediTasks.objects.filter(**query).values()
if len(tasks) > 0:
if 'eventservice' in tasks[0] and tasks[0]['eventservice'] == 1: eventservice = True
if eventservice:
mode = 'drop'
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop': mode = 'drop'
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'nodrop': mode = 'nodrop'
jobsummary,jobcpuTimeScoutID,hs06sSum,maxpss,walltime,sitepss,sitewalltime,maxpssf,walltimef,sitepssf,sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent = jobSummary2(query, exclude={}, mode=mode, isEventServiceFlag=True, substatusfilter='non_es_merge')
jobsummaryESMerge,jobcpuTimeScoutIDESM,hs06sSumESM,maxpssESM,walltimeESM,sitepssESM,sitewalltimeESM,maxpssfESM,walltimefESM,sitepssfESM,sitewalltimefESM, maxpsspercoreESM, maxpssfpercoreESM, hs06sESM, hs06sfESM, walltimepereventESM = jobSummary2(query, exclude={}, mode=mode, isEventServiceFlag=True, substatusfilter='es_merge')
else:
## Exclude merge jobs. Can be misleading. Can show failures with no downstream successes.
exclude = {'processingtype' : 'pmerge' }
mode='drop'
if 'mode' in request.session['requestParams']:
mode= request.session['requestParams']['mode']
jobsummary,jobcpuTimeScoutID,hs06sSum,maxpss,walltime,sitepss,sitewalltime,maxpssf,walltimef,sitepssf,sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent = jobSummary2(query, exclude=exclude, mode=mode)
jobsummaryPMERGE, jobcpuTimeScoutIDPMERGE, hs06sSumPMERGE, maxpssPMERGE, walltimePMERGE, sitepssPMERGE, sitewalltimePMERGE, maxpssfPMERGE, walltimefPMERGE, sitepssfPMERGE, sitewalltimefPMERGE, maxpsspercorePMERGE, maxpssfpercorePMERGE, hs06sPMERGE, hs06sfPMERGE, walltimepereventPMERGE = jobSummary2(query, exclude={}, mode=mode, processingtype='pmerge')
elif 'taskname' in request.session['requestParams']:
querybyname = {'taskname' : request.session['requestParams']['taskname'] }
tasks = JediTasks.objects.filter(**querybyname).values()
if len(tasks) > 0:
jeditaskid = tasks[0]['jeditaskid']
query = {'jeditaskid' : jeditaskid}
maxpssave = 0
maxpsscount = 0
for maxpssjob in maxpss:
if maxpssjob > 0:
maxpssave += maxpssjob
maxpsscount += 1
if maxpsscount > 0:
maxpssave = maxpssave/maxpsscount
else:
maxpssave = ''
tasks = cleanTaskList(request,tasks)
try:
taskrec = tasks[0]
if jobcpuTimeScoutID and jobcpuTimeScoutID> 0:
taskrec['cputimescoutjob']=jobcpuTimeScoutID
colnames = taskrec.keys()
colnames.sort()
for k in colnames:
val = taskrec[k]
if taskrec[k] == None:
val = ''
continue
pair = { 'name' : k, 'value' : val }
columns.append(pair)
except IndexError:
taskrec = None
taskpars = JediTaskparams.objects.filter(**query).extra(where=['ROWNUM <= 1000']).values()
jobparams = None
taskparams = None
taskparaml = None
jobparamstxt = []
if len(taskpars) > 0:
taskparams = taskpars[0]['taskparams']
try:
taskparams = json.loads(taskparams)
tpkeys = taskparams.keys()
tpkeys.sort()
taskparaml = []
for k in tpkeys:
rec = { 'name' : k, 'value' : taskparams[k] }
taskparaml.append(rec)
jobparams = taskparams['jobParameters']
jobparams.append(taskparams['log'])
for p in jobparams:
if p['type'] == 'constant':
ptxt = p['value']
elif p['type'] == 'template':
ptxt = "<i>%s template:</i> value='%s' " % ( p['param_type'], p['value'] )
for v in p:
if v in ['type', 'param_type', 'value' ]: continue
ptxt += " %s='%s'" % ( v, p[v] )
else:
ptxt = '<i>unknown parameter type %s:</i> ' % p['type']
for v in p:
if v in ['type', ]: continue
ptxt += " %s='%s'" % ( v, p[v] )
jobparamstxt.append(ptxt)
jobparamstxt = sorted(jobparamstxt, key=lambda x:x.lower())
except ValueError:
pass
if taskrec and 'ticketsystemtype' in taskrec and taskrec['ticketsystemtype'] == '' and taskparams != None:
if 'ticketID' in taskparams: taskrec['ticketid'] = taskparams['ticketID']
if 'ticketSystemType' in taskparams: taskrec['ticketsystemtype'] = taskparams['ticketSystemType']
if taskrec:
taskname = taskrec['taskname']
elif 'taskname' in request.session['requestParams']:
taskname = request.session['requestParams']['taskname']
else:
taskname = ''
logtxt = None
if taskrec and taskrec['errordialog']:
mat = re.match('^.*"([^"]+)"',taskrec['errordialog'])
if mat:
errurl = mat.group(1)
cmd = "curl -s -f --compressed '%s'" % errurl
logpfx = u"logtxt: %s\n" % cmd
logout = commands.getoutput(cmd)
if len(logout) > 0: logtxt = logout
dsquery = {}
dsquery['jeditaskid'] = jeditaskid
dsets = JediDatasets.objects.filter(**dsquery).values()
dsinfo = None
nfiles = 0
nfinished = 0
nfailed = 0
neventsTot = 0
neventsUsedTot = 0
if len(dsets) > 0:
for ds in dsets:
if ds['type'] not in ['input', 'pseudo_input' ]: continue
if ds['masterid']: continue
if int(ds['nevents']) > 0:
neventsTot += int(ds['nevents'])
neventsUsedTot += int(ds['neventsused'])
if int(ds['nfiles']) > 0:
nfiles += int(ds['nfiles'])
nfinished += int(ds['nfilesfinished'])
nfailed += int(ds['nfilesfailed'])
dsets = sorted(dsets, key=lambda x:x['datasetname'].lower())
if nfiles > 0:
dsinfo = {}
dsinfo['nfiles'] = nfiles
dsinfo['nfilesfinished'] = nfinished
dsinfo['nfilesfailed'] = nfailed
dsinfo['pctfinished'] = int(100.*nfinished/nfiles)
dsinfo['pctfailed'] = int(100.*nfailed/nfiles)
if taskrec: taskrec['dsinfo'] = dsinfo
## get dataset types
dstypesd = {}
for ds in dsets:
dstype = ds['type']
if dstype not in dstypesd: dstypesd[dstype] = 0
dstypesd[dstype] += 1
dstkeys = dstypesd.keys()
dstkeys.sort()
dstypes = []
for dst in dstkeys:
dstd = { 'type' : dst, 'count' : dstypesd[dst] }
dstypes.append(dstd)
## get input containers
inctrs = []
if taskparams and 'dsForIN' in taskparams:
inctrs = [ taskparams['dsForIN'], ]
## get output containers
cquery = {}
cquery['jeditaskid'] = jeditaskid
cquery['type__in'] = ( 'output', 'log' )
outctrs = JediDatasets.objects.filter(**cquery).values_list('containername',flat=True).distinct()
if len(outctrs) == 0 or outctrs[0] == '':
outctrs = None
#getBrokerageLog(request)
## For event service, pull the jobs and event ranges
if eventservice:
jquery = {}
jquery['jeditaskid'] = jeditaskid
jobs = []
jobs.extend(Jobsactive4.objects.filter(**jquery).values('pandaid','jeditaskid', 'transformation', 'jobstatus', 'modificationtime', 'currentpriority'))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values('pandaid','jeditaskid', 'transformation', 'jobstatus', 'modificationtime', 'currentpriority'))
taskdict = {}
for job in jobs:
taskdict[job['pandaid']] = job['jeditaskid']
estaskdict = {}
#esjobs = Set()
jobs = cleanJobList(request, jobs, mode='drop', doAddMeta=False)
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
esquery = {}
'''
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id,transactionKey))
query = """INSERT INTO """+tmpTableName+"""(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
new_cur.execute("SELECT PANDAID,STATUS FROM ATLAS_PANDA.JEDI_EVENTS WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey))
evtable = dictfetchall(new_cur)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
# esquery['pandaid__in'] = esjobs
# evtable = JediEvents.objects.filter(**esquery).values('pandaid','status')
for ev in evtable:
taskid = taskdict[ev['PANDAID']]
if taskid not in estaskdict:
estaskdict[taskid] = {}
for s in eventservicestatelist:
estaskdict[taskid][s] = 0
evstat = eventservicestatelist[ev['STATUS']]
estaskdict[taskid][evstat] += 1
if jeditaskid in estaskdict:
estaskstr = ''
for s in estaskdict[jeditaskid]:
if estaskdict[jeditaskid][s] > 0:
estaskstr += " %s(%s) " % ( s, estaskdict[jeditaskid][s] )
taskrec['estaskstr'] = estaskstr
'''
#neventsTot = 0
#neventsUsedTot = 0
if taskrec:
taskrec['totev'] = neventsTot
taskrec['totevproc'] = neventsUsedTot
taskrec['pctfinished'] = (100*taskrec['totevproc']/taskrec['totev']) if (taskrec['totev'] > 0) else ''
taskrec['totevhs06'] = (neventsTot)*taskrec['cputime'] if (taskrec['cputime'] is not None and neventsTot > 0) else None
# if taskrec['pctfinished']<=20 or hs06sSum['total']==0:
# taskrec['totevhs06'] = (neventsTot)*taskrec['cputime'] if (taskrec['cputime'] is not None and neventsTot > 0) else None
# else:
# taskrec['totevhs06'] = int(hs06sSum['total']*neventsTot)
taskrec['totevprochs06'] = int(hs06sSum['finished'])
taskrec['failedevprochs06'] = int(hs06sSum['failed'])
taskrec['maxpssave'] = maxpssave
taskrec['kibanatimefrom'] = taskrec['creationdate'].strftime("%Y-%m-%dT%H:%M:%SZ")
if taskrec['status'] in ['cancelled', 'failed','broken','aborted', 'finished', 'done']:
taskrec['kibanatimeto']=taskrec['modificationtime'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimeto']=datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
tquery = {}
tquery['jeditaskid'] = jeditaskid
tquery['storagetoken__isnull'] = False
storagetoken = JediDatasets.objects.filter(**tquery).values('storagetoken')
taskbrokerage = 'prod_brokerage' if (taskrec['tasktype'] == 'prod') else 'analy_brokerage'
if storagetoken:
if taskrec:
taskrec['destination']=storagetoken[0]['storagetoken']
if (taskrec['cloud'] == 'WORLD'):
taskrec['destination'] = taskrec['nucleus']
if (('HTTP_ACCEPT' in request.META) and(request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
del tasks
del columns
del ds
if taskrec:
taskrec['creationdate'] = taskrec['creationdate'].strftime(defaultDatetimeFormat)
taskrec['modificationtime'] = taskrec['modificationtime'].strftime(defaultDatetimeFormat)
taskrec['starttime'] = taskrec['starttime'].strftime(defaultDatetimeFormat)
taskrec['statechangetime'] = taskrec['statechangetime'].strftime(defaultDatetimeFormat)
for dset in dsets:
dset['creationtime'] = dset['creationtime'].strftime(defaultDatetimeFormat)
dset['modificationtime'] = dset['modificationtime'].strftime(defaultDatetimeFormat)
if dset['statechecktime'] is not None:
dset['statechecktime'] = dset['statechecktime'].strftime(defaultDatetimeFormat)
data = {
'task' : taskrec,
'taskparams' : taskparams,
'datasets' : dsets,
}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
else:
attrs = []
do_redirect = False
try:
if int(jeditaskid) > 0 and int(jeditaskid) < 4000000:
do_redirect = True
except:
pass
if do_redirect:
del request.session['TFIRST']
del request.session['TLAST']
return redirect('http://panda.cern.ch/?taskname=%s&overview=taskinfo' % jeditaskid)
if taskrec:
attrs.append({'name' : 'Status', 'value' : taskrec['status'] })
del request.session['TFIRST']
del request.session['TLAST']
data = {
'nomodeurl': nomodeurl,
'jobsummaryESMerge': jobsummaryESMerge,
'jobsummaryPMERGE' : jobsummaryPMERGE,
'maxpss' : maxpss,
'taskbrokerage':taskbrokerage,
'walltime' : walltime,
'sitepss': json.dumps(sitepss),
'sitewalltime': json.dumps(sitewalltime),
'maxpssf' : maxpssf,
'walltimef' : walltimef,
'sitepssf': json.dumps(sitepssf),
'sitewalltimef': json.dumps(sitewalltimef),
'maxpsspercore': maxpsspercore,
'maxpssfpercore': maxpssfpercore,
'hs06s': hs06s,
'hs06sf': hs06sf,
'walltimeperevent': walltimeperevent,
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'task' : taskrec,
'taskname' : taskname,
'taskparams' : taskparams,
'taskparaml' : taskparaml,
'jobparams' : jobparamstxt,
'columns' : columns,
'attrs' : attrs,
'jobsummary' : jobsummary,
'jeditaskid' : jeditaskid,
'logtxt' : logtxt,
'datasets' : dsets,
'dstypes' : dstypes,
'inctrs' : inctrs,
'outctrs' : outctrs,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('taskInfoES.html', data, RequestContext(request))
else:
response = render_to_response('taskInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
def jobSummary2(query, exclude={}, mode='drop', isEventServiceFlag=False, substatusfilter = '', processingtype = ''):
jobs = []
jobcpuTimeScoutID=0
newquery = copy.deepcopy(query)
if substatusfilter != '':
if (substatusfilter == 'es_merge'):
newquery['eventservice'] = 2
else:
exclude['eventservice'] = 2
if processingtype != '':
newquery['processingtype'] = 'pmerge'
#newquery['jobstatus'] = 'finished'
#Here we apply sort for implem rule about two jobs in Jobsarchived and Jobsarchived4 with 'finished' and closed statuses
jobs.extend(Jobsarchived.objects.filter(**newquery).exclude(**exclude).\
values('eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus','pandaid','jobstatus','jeditaskid','processingtype','maxpss', 'starttime', 'endtime', 'corecount', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents'))
jobs.extend(Jobsdefined4.objects.filter(**newquery).exclude(**exclude).\
values('eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus', 'pandaid','jobstatus','jeditaskid','processingtype','maxpss', 'starttime', 'endtime', 'corecount', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents'))
jobs.extend(Jobswaiting4.objects.filter(**newquery).exclude(**exclude).\
values('eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus','pandaid','jobstatus','jeditaskid','processingtype','maxpss', 'starttime', 'endtime', 'corecount', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents'))
jobs.extend(Jobsactive4.objects.filter(**newquery).exclude(**exclude).\
values('eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus','pandaid','jobstatus','jeditaskid','processingtype','maxpss', 'starttime', 'endtime', 'corecount', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents'))
jobs.extend(Jobsarchived4.objects.filter(**newquery).exclude(**exclude).\
values('eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus','pandaid','jobstatus','jeditaskid','processingtype','maxpss', 'starttime', 'endtime', 'corecount', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents'))
jobsSet = {}
newjobs = []
hs06sSum={'finished':0,'failed':0, 'total':0}
cpuTimeCurrent=[]
for job in jobs:
if not job['pandaid'] in jobsSet:
jobsSet[job['pandaid']] = job['jobstatus']
newjobs.append(job)
elif jobsSet[job['pandaid']] == 'closed' and job['jobstatus'] == 'finished':
jobsSet[job['pandaid']] = job['jobstatus']
newjobs.append(job)
if 'scout=cpuTime' in job['jobmetrics']:
jobcpuTimeScoutID=job['pandaid']
if 'corecount' in job and job['corecount'] is None:
job['corecount']=1
if job['jobstatus'] in ['finished','failed'] and 'endtime' in job and 'starttime' in job and job['starttime'] and job['endtime']:
duration=max(job['endtime'] - job['starttime'], timedelta(seconds=0))
job['duration']= duration.days*24*3600+duration.seconds
if job['computingsite'] in pandaSites:
job['hs06s']=(job['duration'])*float(pandaSites[job['computingsite']]['corepower'])*job['corecount']
else:
job['hs06s']=0
if job['nevents'] and job['nevents']>0:
cpuTimeCurrent.append(job['hs06s']/job['nevents'])
job['walltimeperevent']=job['duration']*job['corecount']/job['nevents']
hs06sSum['finished']+=job['hs06s'] if job['jobstatus']=='finished' else 0
hs06sSum['failed']+=job['hs06s'] if job['jobstatus']=='failed' else 0
if len(cpuTimeCurrent)>0:
hs06sSum['total']= sum(cpuTimeCurrent)/len(cpuTimeCurrent)
jobs = newjobs
if mode == 'drop' and len(jobs) < 120000:
print 'filtering retries'
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
droppedIDs = set()
if len(taskids) == 1:
for task in taskids:
retryquery = {}
retryquery['jeditaskid'] = task
retries = JediJobRetryHistory.objects.filter(**retryquery).extra(where=["OLDPANDAID!=NEWPANDAID AND RELATIONTYPE IN ('', 'retry', 'pmerge', 'merge', 'jobset_retry', 'jobset_id', 'es_merge')"]).order_by('newpandaid').values()
print 'got the retries', len(jobs), len(retries)
hashRetries = {}
for retry in retries:
hashRetries[retry['oldpandaid']] = retry
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
if not isEventServiceFlag:
#if not isEventService(job):
if hashRetries.has_key(pandaid):
retry = hashRetries[pandaid]
# if not isEventServiceFlag:
if retry['relationtype'] == '' or retry['relationtype'] == 'retry' or (
job['processingtype'] == 'pmerge' and retry['relationtype'] == 'merge'):
dropJob = retry['newpandaid']
else:
if (job['pandaid'] in hashRetries):
if (hashRetries[job['pandaid']]['relationtype'] == ('retry')):
dropJob = 1
# if (hashRetries[job['pandaid']]['relationtype'] == 'es_merge' and (job['jobsubstatus'] == 'es_merge')):
# dropJob = 1
if (dropJob == 0):
if (job['jobsetid'] in hashRetries) and (
hashRetries[job['jobsetid']]['relationtype'] in ('jobset_retry')):
dropJob = 1
if (job['jobstatus'] == 'closed' and (job['jobsubstatus'] in ('es_unused', 'es_inaction'))):
dropJob = 1
if (dropJob == 0):
if not (job['processingtype'] == 'pmerge'):
newjobs.append(job)
elif processingtype == 'pmerge':
newjobs.append(job)
else:
if not pandaid in droppedIDs:
droppedIDs.add(pandaid)
droplist.append( { 'pandaid' : pandaid, 'newpandaid' : dropJob } )
jobs = newjobs
print 'done filtering'
maxpss = []
maxpsspercore = []
walltime = []
sitepss = []
sitewalltime = []
maxpssf = []
maxpssfpercore = []
walltimef = []
sitepssf = []
sitewalltimef = []
hs06s=[]
hs06sf=[]
walltimeperevent = []
for job in jobs:
if job['corecount'] is None:
job['corecount'] = 1
if job['maxpss'] is not None and job['maxpss'] != -1:
if job['jobstatus']== 'finished':
maxpss.append(job['maxpss']/1024)
maxpsspercore.append(job['maxpss']/1024/job['corecount'])
sitepss.append(job['computingsite'])
if job['jobstatus'] == 'failed':
maxpssf.append(job['maxpss']/1024)
maxpssfpercore.append(job['maxpss']/1024/job['corecount'])
sitepssf.append(job['computingsite'])
if 'duration' in job and job['duration']:
if job['jobstatus']== 'finished':
walltime.append(job['duration'])
sitewalltime.append(job['computingsite'])
hs06s.append(job['hs06s'])
if 'walltimeperevent' in job:
walltimeperevent.append(job['walltimeperevent'])
if job['jobstatus']== 'failed':
walltimef.append(job['duration'])
sitewalltimef.append(job['computingsite'])
hs06sf.append(job['hs06s'])
jobstates = []
global statelist
for state in statelist:
statecount = {}
statecount['name'] = state
statecount['count'] = 0
for job in jobs:
#if isEventService and job['jobstatus'] == 'cancelled':
# job['jobstatus'] = 'finished'
if job['jobstatus'] == state:
statecount['count'] += 1
continue
jobstates.append(statecount)
return jobstates, jobcpuTimeScoutID, hs06sSum, maxpss, walltime, sitepss, sitewalltime, maxpssf, walltimef, sitepssf, sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent
def jobStateSummary(jobs):
global statelist
statecount = {}
for state in statelist:
statecount[state] = 0
for job in jobs:
statecount[job['jobstatus']] += 1
return statecount
def errorSummaryDict(request,jobs, tasknamedict, testjobs):
""" take a job list and produce error summaries from it """
errsByCount = {}
errsBySite = {}
errsByUser = {}
errsByTask = {}
sumd = {}
## histogram of errors vs. time, for plotting
errHist = {}
flist = [ 'cloud', 'computingsite', 'produsername', 'taskid', 'jeditaskid', 'processingtype', 'prodsourcelabel', 'transformation', 'workinggroup', 'specialhandling', 'jobstatus' ]
print len(jobs)
for job in jobs:
if not testjobs:
if job['jobstatus'] not in [ 'failed', 'holding' ]: continue
site = job['computingsite']
# if 'cloud' in request.session['requestParams']:
# if site in homeCloud and homeCloud[site] != request.session['requestParams']['cloud']: continue
user = job['produsername']
taskname = ''
if job['jeditaskid'] > 0:
taskid = job['jeditaskid']
if taskid in tasknamedict:
taskname = tasknamedict[taskid]
tasktype = 'jeditaskid'
else:
taskid = job['taskid']
if taskid in tasknamedict:
taskname = tasknamedict[taskid]
tasktype = 'taskid'
if 'modificationtime' in job:
tm = job['modificationtime']
if tm is not None:
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in errHist: errHist[tm] = 0
errHist[tm] += 1
## Overall summary
for f in flist:
if job[f]:
if f == 'taskid' and job[f] < 1000000 and 'produsername' not in request.session['requestParams']:
pass
else:
if not f in sumd: sumd[f] = {}
if not job[f] in sumd[f]: sumd[f][job[f]] = 0
sumd[f][job[f]] += 1
if job['specialhandling']:
if not 'specialhandling' in sumd: sumd['specialhandling'] = {}
shl = job['specialhandling'].split()
for v in shl:
if not v in sumd['specialhandling']: sumd['specialhandling'][v] = 0
sumd['specialhandling'][v] += 1
for err in errorcodelist:
if job[err['error']] != 0 and job[err['error']] != '' and job[err['error']] != None:
errval = job[err['error']]
## error code of zero is not an error
if errval == 0 or errval == '0' or errval == None: continue
errdiag = ''
try:
errnum = int(errval)
if err['error'] in errorCodes and errnum in errorCodes[err['error']]:
errdiag = errorCodes[err['error']][errnum]
except:
errnum = errval
errcode = "%s:%s" % ( err['name'], errnum )
if err['diag']:
errdiag = job[err['diag']]
if errcode not in errsByCount:
errsByCount[errcode] = {}
errsByCount[errcode]['error'] = errcode
errsByCount[errcode]['codename'] = err['error']
errsByCount[errcode]['codeval'] = errnum
errsByCount[errcode]['diag'] = errdiag
errsByCount[errcode]['count'] = 0
errsByCount[errcode]['count'] += 1
if user not in errsByUser:
errsByUser[user] = {}
errsByUser[user]['name'] = user
errsByUser[user]['errors'] = {}
errsByUser[user]['toterrors'] = 0
if errcode not in errsByUser[user]['errors']:
errsByUser[user]['errors'][errcode] = {}
errsByUser[user]['errors'][errcode]['error'] = errcode
errsByUser[user]['errors'][errcode]['codename'] = err['error']
errsByUser[user]['errors'][errcode]['codeval'] = errnum
errsByUser[user]['errors'][errcode]['diag'] = errdiag
errsByUser[user]['errors'][errcode]['count'] = 0
errsByUser[user]['errors'][errcode]['count'] += 1
errsByUser[user]['toterrors'] += 1
if site not in errsBySite:
errsBySite[site] = {}
errsBySite[site]['name'] = site
errsBySite[site]['errors'] = {}
errsBySite[site]['toterrors'] = 0
errsBySite[site]['toterrjobs'] = 0
if errcode not in errsBySite[site]['errors']:
errsBySite[site]['errors'][errcode] = {}
errsBySite[site]['errors'][errcode]['error'] = errcode
errsBySite[site]['errors'][errcode]['codename'] = err['error']
errsBySite[site]['errors'][errcode]['codeval'] = errnum
errsBySite[site]['errors'][errcode]['diag'] = errdiag
errsBySite[site]['errors'][errcode]['count'] = 0
errsBySite[site]['errors'][errcode]['count'] += 1
errsBySite[site]['toterrors'] += 1
if tasktype == 'jeditaskid' or taskid > 1000000 or 'produsername' in request.session['requestParams']:
if taskid not in errsByTask:
errsByTask[taskid] = {}
errsByTask[taskid]['name'] = taskid
errsByTask[taskid]['longname'] = taskname
errsByTask[taskid]['errors'] = {}
errsByTask[taskid]['toterrors'] = 0
errsByTask[taskid]['toterrjobs'] = 0
errsByTask[taskid]['tasktype'] = tasktype
if errcode not in errsByTask[taskid]['errors']:
errsByTask[taskid]['errors'][errcode] = {}
errsByTask[taskid]['errors'][errcode]['error'] = errcode
errsByTask[taskid]['errors'][errcode]['codename'] = err['error']
errsByTask[taskid]['errors'][errcode]['codeval'] = errnum
errsByTask[taskid]['errors'][errcode]['diag'] = errdiag
errsByTask[taskid]['errors'][errcode]['count'] = 0
errsByTask[taskid]['errors'][errcode]['count'] += 1
errsByTask[taskid]['toterrors'] += 1
if site in errsBySite: errsBySite[site]['toterrjobs'] += 1
if taskid in errsByTask: errsByTask[taskid]['toterrjobs'] += 1
## reorganize as sorted lists
errsByCountL = []
errsBySiteL = []
errsByUserL = []
errsByTaskL = []
kys = errsByCount.keys()
kys.sort()
for err in kys:
errsByCountL.append(errsByCount[err])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByCountL = sorted(errsByCountL, key=lambda x:-x['count'])
kys = errsByUser.keys()
kys.sort()
for user in kys:
errsByUser[user]['errorlist'] = []
errkeys = errsByUser[user]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsByUser[user]['errorlist'].append(errsByUser[user]['errors'][err])
errsByUserL.append(errsByUser[user])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByUserL = sorted(errsByUserL, key=lambda x:-x['toterrors'])
kys = errsBySite.keys()
kys.sort()
for site in kys:
errsBySite[site]['errorlist'] = []
errkeys = errsBySite[site]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsBySite[site]['errorlist'].append(errsBySite[site]['errors'][err])
errsBySiteL.append(errsBySite[site])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsBySiteL = sorted(errsBySiteL, key=lambda x:-x['toterrors'])
kys = errsByTask.keys()
kys.sort()
for taskid in kys:
errsByTask[taskid]['errorlist'] = []
errkeys = errsByTask[taskid]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsByTask[taskid]['errorlist'].append(errsByTask[taskid]['errors'][err])
errsByTaskL.append(errsByTask[taskid])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByTaskL = sorted(errsByTaskL, key=lambda x:-x['toterrors'])
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
for item in suml:
item['list'] = sorted(item['list'], key=lambda x:-x['kvalue'])
kys = errHist.keys()
kys.sort()
errHistL = []
for k in kys:
errHistL.append( [ k, errHist[k] ] )
return errsByCountL, errsBySiteL, errsByUserL, errsByTaskL, suml, errHistL
def getTaskName(tasktype,taskid):
taskname = ''
if tasktype == 'taskid':
taskname = ''
elif tasktype == 'jeditaskid' and taskid and taskid != 'None' :
tasks = JediTasks.objects.filter(jeditaskid=taskid).values('taskname')
if len(tasks) > 0:
taskname = tasks[0]['taskname']
return taskname
@cache_page(60*20)
def errorSummary(request):
valid, response = initRequest(request)
if not valid: return response
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams']['prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
elif testjobs:
jobtype = 'rc_test'
if jobtype == '':
hours = 3
limit = 6000
elif jobtype.startswith('anal'):
hours = 6
limit = 6000
else:
hours = 12
limit = 6000
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
query,wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=limit, wildCardExt=True)
if not testjobs: query['jobstatus__in'] = [ 'failed', 'holding' ]
jobs = []
values = 'eventservice','produsername', 'pandaid', 'cloud','computingsite','cpuconsumptiontime','jobstatus','transformation','prodsourcelabel','specialhandling','vo','modificationtime', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'currentpriority', 'computingelement'
print "step3-1"
print str(datetime.now())
if testjobs:
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__range'][0], "%Y-%m-%d %H:%M:%S" )).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__range'][1], "%Y-%m-%d %H:%M:%S" )).days > 1)):
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
print "step3-1-0"
print str(datetime.now())
jobs = cleanJobList(request, jobs, mode='nodrop', doAddMeta = False)
njobs = len(jobs)
tasknamedict = taskNameDict(jobs)
print "step3-1-1"
print str(datetime.now())
## Build the error summary.
errsByCount, errsBySite, errsByUser, errsByTask, sumd, errHist = errorSummaryDict(request,jobs, tasknamedict, testjobs)
## Build the state summary and add state info to site error summary
#notime = True
#if testjobs: notime = False
notime = False #### behave as it used to before introducing notime for dashboards. Pull only 12hrs.
statesummary = dashSummary(request, hours, limit=limit, view=jobtype, cloudview='region', notime=notime)
sitestates = {}
savestates = [ 'finished', 'failed', 'cancelled', 'holding', ]
for cloud in statesummary:
for site in cloud['sites']:
sitename = cloud['sites'][site]['name']
sitestates[sitename] = {}
for s in savestates:
sitestates[sitename][s] = cloud['sites'][site]['states'][s]['count']
sitestates[sitename]['pctfail'] = cloud['sites'][site]['pctfail']
for site in errsBySite:
sitename = site['name']
if sitename in sitestates:
for s in savestates:
if s in sitestates[sitename]: site[s] = sitestates[sitename][s]
if 'pctfail' in sitestates[sitename]: site['pctfail'] = sitestates[sitename]['pctfail']
taskname = ''
if not testjobs:
## Build the task state summary and add task state info to task error summary
print "step3-1-2"
print str(datetime.now())
taskstatesummary = dashTaskSummary(request, hours, limit=limit, view=jobtype)
print "step3-2"
print str(datetime.now())
taskstates = {}
for task in taskstatesummary:
taskid = task['taskid']
taskstates[taskid] = {}
for s in savestates:
taskstates[taskid][s] = task['states'][s]['count']
if 'pctfail' in task: taskstates[taskid]['pctfail'] = task['pctfail']
for task in errsByTask:
taskid = task['name']
if taskid in taskstates:
for s in savestates:
if s in taskstates[taskid]: task[s] = taskstates[taskid][s]
if 'pctfail' in taskstates[taskid]: task['pctfail'] = taskstates[taskid]['pctfail']
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid',request.session['requestParams']['jeditaskid'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
else:
sortby = 'alpha'
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
print "step3-3"
print str(datetime.now())
request.session['max_age_minutes'] = 6
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
nosorturl = removeParam(request.get_full_path(), 'sortby')
xurl = extensibleURL(request)
jobsurl = xurl.replace('/errors/','/jobs/')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'requestString' : request.META['QUERY_STRING'],
'jobtype' : jobtype,
'njobs' : njobs,
'hours' : LAST_N_HOURS_MAX,
'limit' : request.session['JOB_LIMIT'],
'user' : None,
'xurl' : xurl,
'jobsurl' : jobsurl,
'nosorturl' : nosorturl,
'errsByCount' : errsByCount,
'errsBySite' : errsBySite,
'errsByUser' : errsByUser,
'errsByTask' : errsByTask,
'sumd' : sumd,
'errHist' : errHist,
'tfirst' : TFIRST,
'tlast' : TLAST,
'sortby' : sortby,
'taskname' : taskname,
'flowstruct' : flowstruct,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('errorSummary.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
for job in jobs:
resp.append({ 'pandaid': job.pandaid, 'status': job.jobstatus, 'prodsourcelabel': job.prodsourcelabel, 'produserid' : job.produserid})
return HttpResponse(json.dumps(resp), mimetype='text/html')
def removeParam(urlquery, parname, mode='complete'):
"""Remove a parameter from current query"""
urlquery = urlquery.replace('&&','&')
urlquery = urlquery.replace('?&','?')
pstr = '.*(%s=[a-zA-Z0-9\.\-]*).*' % parname
pat = re.compile(pstr)
mat = pat.match(urlquery)
if mat:
pstr = mat.group(1)
urlquery = urlquery.replace(pstr,'')
urlquery = urlquery.replace('&&','&')
urlquery = urlquery.replace('?&','?')
if mode != 'extensible':
if urlquery.endswith('?') or urlquery.endswith('&'): urlquery = urlquery[:len(urlquery)-1]
return urlquery
@cache_page(60*20)
def incidentList(request):
valid, response = initRequest(request)
if not valid: return response
if 'days' in request.session['requestParams']:
hours = int(request.session['requestParams']['days'])*24
else:
if 'hours' not in request.session['requestParams']:
hours = 24*3
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
iquery = {}
cloudQuery = Q()
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['at_time__range'] = [startdate, enddate]
if 'site' in request.session['requestParams']:
iquery['description__contains'] = 'queue=%s' % request.session['requestParams']['site']
if 'category' in request.session['requestParams']:
iquery['description__startswith'] = '%s:' % request.session['requestParams']['category']
if 'comment' in request.session['requestParams']:
iquery['description__contains'] = '%s' % request.session['requestParams']['comment']
if 'notifier' in request.session['requestParams']:
iquery['description__contains'] = 'DN=%s' % request.session['requestParams']['notifier']
if 'cloud' in request.session['requestParams']:
sites = [site for site, cloud in homeCloud.items() if cloud == request.session['requestParams']['cloud']]
for site in sites:
cloudQuery = cloudQuery | Q(description__contains='queue=%s' % site)
incidents = Incidents.objects.filter(**iquery).filter(cloudQuery).order_by('at_time').reverse().values()
sumd = {}
pars = {}
incHist = {}
for inc in incidents:
desc = inc['description']
desc = desc.replace(' ',' ')
parsmat = re.match('^([a-z\s]+):\s+queue=([^\s]+)\s+DN=(.*)\s\s\s*([A-Za-z^ \.0-9]*)$',desc)
tm = inc['at_time']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in incHist: incHist[tm] = 0
incHist[tm] += 1
if parsmat:
pars['category'] = parsmat.group(1)
pars['site'] = parsmat.group(2)
pars['notifier'] = parsmat.group(3)
pars['type'] = inc['typekey']
if homeCloud.has_key(pars['site']):
pars['cloud'] = homeCloud[pars['site']]
if parsmat.group(4): pars['comment'] = parsmat.group(4)
else:
parsmat = re.match('^([A-Za-z\s]+):.*$',desc)
if parsmat:
pars['category'] = parsmat.group(1)
else:
pars['category'] = desc[:10]
for p in pars:
if p not in sumd:
sumd[p] = {}
sumd[p]['param'] = p
sumd[p]['vals'] = {}
if pars[p] not in sumd[p]['vals']:
sumd[p]['vals'][pars[p]] = {}
sumd[p]['vals'][pars[p]]['name'] = pars[p]
sumd[p]['vals'][pars[p]]['count'] = 0
sumd[p]['vals'][pars[p]]['count'] += 1
## convert incident components to URLs. Easier here than in the template.
if 'site' in pars:
inc['description'] = re.sub('queue=[^\s]+','queue=<a href="%ssite=%s">%s</a>' % (extensibleURL(request), pars['site'], pars['site']), inc['description'])
## convert to ordered lists
suml = []
for p in sumd:
itemd = {}
itemd['param'] = p
iteml = []
kys = sumd[p]['vals'].keys()
kys.sort(key=lambda y: y.lower())
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[p]['vals'][ky]['count'] })
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['param'].lower())
kys = incHist.keys()
kys.sort()
incHistL = []
for k in kys:
incHistL.append( [ k, incHist[k] ] )
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'user' : None,
'incidents': incidents,
'sumd' : suml,
'incHist' : incHistL,
'xurl' : extensibleURL(request),
'hours' : hours,
'ninc' : len(incidents),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('incidents.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
clearedInc = []
for inc in incidents:
entry = {}
entry['at_time'] = inc['at_time'].isoformat()
entry['typekey'] = inc['typekey']
entry['description'] = inc['description']
clearedInc.append(entry)
jsonResp = json.dumps(clearedInc)
return HttpResponse(jsonResp, mimetype='text/html')
cache_page(60*20)
def esPandaLogger(request):
valid, response = initRequest(request)
if not valid: return response
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q
es = Elasticsearch(
hosts=[{'host': 'aianalytics01.cern.ch', 'port': 9200}],
use_ssl=False,
retry_on_timeout=True,
max_retries=3
)
today = time.strftime("%Y-%m-%d")
logindex = 'pandalogger-'+str(today)
logindexdev = 'pandaloggerdev-'+str(today)
#check if dev index exists
indexdev = es.indices.exists(index=logindexdev)
if indexdev:
indices = [logindex,logindexdev]
else:
indices = [logindex]
res = es.search(index=indices, fields=['@message.name', '@message.Type', '@message.levelname'], body={
"aggs": {
"name": {
"terms": {"field": "@message.name"},
"aggs": {
"type": {
"terms": {"field": "@message.Type"},
"aggs": {
"levelname":{
"terms": {"field": "@message.levelname"}
}
}
}
}
}
}
}
)
log={}
for agg in res['aggregations']['name']['buckets']:
name = agg['key']
log[name] = {}
for types in agg['type']['buckets']:
type = types['key']
log[name][type]={}
for levelnames in types['levelname']['buckets']:
levelname = levelnames['key']
log[name][type][levelname]={}
log[name][type][levelname]['levelname'] = levelname
log[name][type][levelname]['lcount'] = str(levelnames['doc_count'])
#print log
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'user' : None,
'log' : log,
}
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
response = render_to_response('esPandaLogger.html', data, RequestContext(request))
return response
cache_page(60*20)
def pandaLogger(request):
valid, response = initRequest(request)
if not valid: return response
getrecs = False
iquery = {}
if 'category' in request.session['requestParams']:
iquery['name'] = request.session['requestParams']['category']
getrecs = True
if 'type' in request.session['requestParams']:
val = escapeInput(request.session['requestParams']['type'])
iquery['type__in'] = val.split('|')
getrecs = True
if 'level' in request.session['requestParams']:
iquery['levelname'] = request.session['requestParams']['level'].upper()
getrecs = True
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
getrecs = True
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
getrecs = True
if 'site' in request.session['requestParams']:
iquery['message__icontains'] = "site=%s " %request.session['requestParams']['site']
getrecs = True
if 'pandaid' in request.session['requestParams']:
iquery['pid'] = request.session['requestParams']['pandaid']
getrecs = True
if 'hours' not in request.session['requestParams']:
if getrecs:
hours = 72
else:
hours = 24
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
if 'startdate' in request.session['requestParams']:
startdate = request.session['requestParams']['startdate']
else:
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
if 'enddate' in request.session['requestParams']:
enddate = request.session['requestParams']['enddate']
else:
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['bintime__range'] = [startdate, enddate]
print iquery
counts = Pandalog.objects.filter(**iquery).values('name','type','levelname').annotate(Count('levelname')).order_by('name','type','levelname')
if getrecs:
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[:request.session['JOB_LIMIT']].values()
## histogram of logs vs. time, for plotting
logHist = {}
for r in records:
r['message'] = r['message'].replace('<','')
r['message'] = r['message'].replace('>','')
r['levelname'] = r['levelname'].lower()
tm = r['bintime']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in logHist: logHist[tm] = 0
logHist[tm] += 1
kys = logHist.keys()
kys.sort()
logHistL = []
for k in kys:
logHistL.append( [ k, logHist[k] ] )
else:
records = None
logHistL = None
logs = {}
totcount = 0
for inc in counts:
name = inc['name']
type = inc['type']
level = inc['levelname']
count = inc['levelname__count']
totcount += count
if name not in logs:
logs[name] = {}
logs[name]['name'] = name
logs[name]['count'] = 0
logs[name]['types'] = {}
logs[name]['count'] += count
if type not in logs[name]['types']:
logs[name]['types'][type] = {}
logs[name]['types'][type]['name'] = type
logs[name]['types'][type]['count'] = 0
logs[name]['types'][type]['levels'] = {}
logs[name]['types'][type]['count'] += count
if level not in logs[name]['types'][type]['levels']:
logs[name]['types'][type]['levels'][level] = {}
logs[name]['types'][type]['levels'][level]['name'] = level.lower()
logs[name]['types'][type]['levels'][level]['count'] = 0
logs[name]['types'][type]['levels'][level]['count'] += count
## convert to ordered lists
logl = []
for l in logs:
itemd = {}
itemd['name'] = logs[l]['name']
itemd['types'] = []
for t in logs[l]['types']:
logs[l]['types'][t]['levellist'] = []
for v in logs[l]['types'][t]['levels']:
logs[l]['types'][t]['levellist'].append(logs[l]['types'][t]['levels'][v])
logs[l]['types'][t]['levellist'] = sorted(logs[l]['types'][t]['levellist'], key=lambda x:x['name'])
typed = {}
typed['name'] = logs[l]['types'][t]['name']
itemd['types'].append(logs[l]['types'][t])
itemd['types'] = sorted(itemd['types'], key=lambda x:x['name'])
logl.append(itemd)
logl = sorted(logl, key=lambda x:x['name'])
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'user' : None,
'logl' : logl,
'records' : records,
'ninc' : totcount,
'logHist' : logHistL,
'xurl' : extensibleURL(request),
'hours' : hours,
'getrecs' : getrecs,
}
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
##self monitor
endSelfMonitor(request)
response = render_to_response('pandaLogger.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
if (('HTTP_ACCEPT' in request.META) and(request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
resp = data
return HttpResponse(json.dumps(resp, cls=DateEncoder), mimetype='text/html')
# def percentile(N, percent, key=lambda x:x):
# """
# Find the percentile of a list of values.
#
# @parameter N - is a list of values. Note N MUST BE already sorted.
# @parameter percent - a float value from 0.0 to 1.0.
# @parameter key - optional key function to compute value from each element of N.
#
# @return - the percentile of the values
# """
# if not N:
# return None
# k = (len(N)-1) * percent
# f = math.floor(k)
# c = math.ceil(k)
# if f == c:
# return key(N[int(k)])
# d0 = key(N[int(f)]) * (c-k)
# d1 = key(N[int(c)]) * (k-f)
# return d0+d1
def ttc(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid==-1:
data = {"error":"no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
query = {'jeditaskid':jeditaskid}
task=JediTasks.objects.filter(**query).values('jeditaskid', 'taskname','workinggroup', 'tasktype', 'processingtype', 'ttcrequested', 'starttime', 'endtime', 'creationdate', 'status')
if len(task)==0:
data = {"error": ("jeditaskid " + str(jeditaskid) + " does not exist") }
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
taskrec=task[0]
if taskrec['tasktype']!='prod' or taskrec['ttcrequested'] == None:
data = {"error":"TTC for this type of task has not implemented yet"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
taskrec['ttc'] = taskrec['starttime'] + timedelta(seconds=((taskrec['ttcrequested'] - taskrec['creationdate']).days * 24 * 3600 + (taskrec['ttcrequested'] - taskrec['creationdate']).seconds))
progressForBar=[]
taskev = GetEventsForTask.objects.filter(**query).values('jeditaskid', 'totev', 'totevrem')
if len(taskev)>0:
taskev = taskev[0]
taskrec['percentage']=((taskev['totev']-taskev['totevrem'])*100/taskev['totev'])
taskrec['percentageok']=taskrec['percentage']-5
if taskrec['status']=='running':
taskrec['ttcbasedpercentage'] = ((datetime.now() - taskrec['starttime']).days * 24 * 3600 + (datetime.now() - taskrec['starttime']).seconds) * 100 / ((taskrec['ttcrequested'] - taskrec['creationdate']).days * 24 * 3600 + (taskrec['ttcrequested'] - taskrec['creationdate']).seconds) if datetime.now()<taskrec['ttc'] else 100
progressForBar=[100, taskrec['percentage'], taskrec['ttcbasedpercentage']]
# tasksetquery={}
# tasksetquery['workinggroup__startswith']='AP' if str(taskrec['workinggroup']).startswith('AP') else 'GP'
# tasksetquery['taskname__startswith']=taskrec['taskname'].split('.')[0]
# tasksetquery['processingtype']=taskrec['processingtype']
# tasksetquery['tasktype']='prod'
# tasksetquery['status__in']=( 'done' , 'finished' )
#
# values=['jeditaskid','starttime']
# taskset = JediTasks.objects.filter(**tasksetquery)[:50].values(*values)
#
# newcur = connection.cursor()
# for task in taskset:
# newcur.execute("select endtime from (SELECT endtime, njobs/totnjobs*100 as percentage from (select endtime, njobs, MAX(njobs) OVER (PARTITION BY jeditaskid) as totnjobs from (select jeditaskid, endtime, row_number() over (PARTITION BY jeditaskid order by endtime ) as njobs from ATLAS_PANDAARCH.JOBSARCHIVED WHERE JEDITASKID=%s and JOBSTATUS='finished'))) where percentage>=%s and rownum<=1", [task['jeditaskid'],taskrec['percentage']])
# tasktime = newcur.fetchone()
# task['percentagetime']=tasktime[0]
# newcur.close()
# duration=[]
# for task in taskset:
# task['durationh']=(task['percentagetime']-task['starttime']).days*24+(task['percentagetime']-task['starttime']).seconds//3600
# duration.append(task['durationh'])
#
# flag='good' if (taskrec['starttime']+ timedelta(hours=round(percentile(sorted(duration),0.95))))>=datetime.now() else 'bad'
# ttcPredicted=taskrec['starttime']+timedelta(hours=((taskrec['ttcrequested']-taskrec['creationdate']).days*24+(taskrec['ttcrequested']-taskrec['creationdate']).seconds//3600))
data = {
'request': request,
'task': taskrec,
'progressForBar': progressForBar,
}
response = render_to_response('ttc.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@cache_page(60*20)
def workingGroups(request):
valid, response = initRequest(request)
if not valid: return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
days = 30
else:
days = taskdays
hours = days*24
query = setupView(request,hours=hours,limit=999999)
query['workinggroup__isnull'] = False
## WG task summary
tasksummary = wgTaskSummary(request, view='working group', taskdays=taskdays)
## WG job summary
wgsummarydata = wgSummary(query)
wgs = {}
for rec in wgsummarydata:
wg = rec['workinggroup']
if wg == None: continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if wg not in wgs:
wgs[wg] = {}
wgs[wg]['name'] = wg
wgs[wg]['count'] = 0
wgs[wg]['states'] = {}
wgs[wg]['statelist'] = []
for state in statelist:
wgs[wg]['states'][state] = {}
wgs[wg]['states'][state]['name'] = state
wgs[wg]['states'][state]['count'] = 0
wgs[wg]['count'] += count
wgs[wg]['states'][jobstatus]['count'] += count
errthreshold = 15
## Convert dict to summary list
wgkeys = wgs.keys()
wgkeys.sort()
wgsummary = []
for wg in wgkeys:
for state in statelist:
wgs[wg]['statelist'].append(wgs[wg]['states'][state])
if int(wgs[wg]['states']['finished']['count']) + int(wgs[wg]['states']['failed']['count']) > 0:
wgs[wg]['pctfail'] = int(100.*float(wgs[wg]['states']['failed']['count'])/(wgs[wg]['states']['finished']['count']+wgs[wg]['states']['failed']['count']))
wgsummary.append(wgs[wg])
if len(wgsummary) == 0: wgsummary = None
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'url' : request.path,
'xurl' : xurl,
'user' : None,
'wgsummary' : wgsummary,
'taskstates' : taskstatedict,
'tasksummary' : tasksummary,
'hours' : hours,
'days' : days,
'errthreshold' : errthreshold,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('workingGroups.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
return HttpResponse(json.dumps(resp), mimetype='text/html')
def datasetInfo(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365*24, limit=999999999)
query = {}
dsets = []
dsrec = None
colnames = []
columns = []
if 'datasetname' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetname']
query['datasetname'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetid']
query['datasetid'] = request.session['requestParams']['datasetid']
else:
dataset = None
if 'jeditaskid' in request.session['requestParams']:
query['jeditaskid'] = int(request.session['requestParams']['jeditaskid'])
if dataset:
dsets = JediDatasets.objects.filter(**query).values()
if len(dsets) == 0:
startdate = timezone.now() - timedelta(hours=30*24)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = { 'modificationdate__range' : [startdate, enddate] }
if 'datasetname' in request.session['requestParams']:
query['name'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
query['vuid'] = request.session['requestParams']['datasetid']
moredsets = Datasets.objects.filter(**query).values()
if len(moredsets) > 0:
dsets = moredsets
for ds in dsets:
ds['datasetname'] = ds['name']
ds['creationtime'] = ds['creationdate']
ds['modificationtime'] = ds['modificationdate']
ds['nfiles'] = ds['numberfiles']
ds['datasetid'] = ds['vuid']
if len(dsets) > 0:
dsrec = dsets[0]
dataset = dsrec['datasetname']
colnames = dsrec.keys()
colnames.sort()
for k in colnames:
val = dsrec[k]
if dsrec[k] == None:
val = ''
continue
pair = { 'name' : k, 'value' : val }
columns.append(pair)
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'dsrec' : dsrec,
'datasetname' : dataset,
'columns' : columns,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('datasetInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
return HttpResponse(json.dumps(dsrec), mimetype='text/html')
def datasetList(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365*24, limit=999999999)
query = {}
dsets = []
for par in ( 'jeditaskid', 'containername' ):
if par in request.session['requestParams']:
query[par] = request.session['requestParams'][par]
if len(query) > 0:
dsets = JediDatasets.objects.filter(**query).values()
dsets = sorted(dsets, key=lambda x:x['datasetname'].lower())
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'datasets' : dsets,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('datasetList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
return HttpResponse(json.dumps(dsrec), mimetype='text/html')
def fileInfo(request):
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
JediDatasetsTableName = "ATLAS_PANDA.JEDI_DATASETS"
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
JediDatasetsTableName = "JEDI_DATASETS"
tmpTableName = "TMP_IDS1"
random.seed()
transactionKey = random.randrange(1000000)
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365*24, limit=999999999)
query = {}
files = []
frec = None
colnames = []
columns = []
if 'filename' in request.session['requestParams']:
file = request.session['requestParams']['filename']
query['lfn'] = request.session['requestParams']['filename']
elif 'lfn' in request.session['requestParams']:
file = request.session['requestParams']['lfn']
query['lfn'] = request.session['requestParams']['lfn']
elif 'fileid' in request.session['requestParams']:
file = request.session['requestParams']['fileid']
query['fileid'] = request.session['requestParams']['fileid']
elif 'guid' in request.session['requestParams']:
file = request.session['requestParams']['guid']
query['guid'] = request.session['requestParams']['guid']
else:
file = None
startdate = None
if 'date_from' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_from'],'%Y-%m-%d')
startdate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if not startdate:
startdate = timezone.now() - timedelta(hours=365*24)
# startdate = startdate.strftime(defaultDatetimeFormat)
enddate = None
if 'date_to' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_to'],'%Y-%m-%d')
enddate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if enddate == None:
enddate = timezone.now() # .strftime(defaultDatetimeFormat)
query['creationdate__range'] = [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)]
if 'pandaid' in request.session['requestParams'] and request.session['requestParams']['pandaid'] != '':
query['pandaid'] = request.session['requestParams']['pandaid']
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid'] != '':
query['jeditaskid'] = request.session['requestParams']['jeditaskid']
if 'scope' in request.session['requestParams']:
query['scope'] = request.session['requestParams']['scope']
if file or (query['pandaid'] is not None) or (query['jeditaskid'] is not None):
files = JediDatasetContents.objects.filter(**query).values()
if len(files) == 0:
del query['creationdate__range']
query['modificationtime__range'] = [startdate.strftime(defaultDatetimeFormat),
enddate.strftime(defaultDatetimeFormat)]
morefiles = Filestable4.objects.filter(**query).values()
if len(morefiles) == 0:
morefiles = FilestableArch.objects.filter(**query).values()
if len(morefiles) > 0:
files = morefiles
for f in files:
f['creationdate'] = f['modificationtime']
f['fileid'] = f['row_id']
f['datasetname'] = f['dataset']
f['oldfiletable'] = 1
connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in files:
executionData.append((id['datasetid'],transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
new_cur.execute(
"SELECT DATASETNAME,DATASETID FROM %s WHERE DATASETID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
JediDatasetsTableName, tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
mrecsDict = {}
for mrec in mrecs:
mrecsDict[mrec['DATASETID']] = mrec['DATASETNAME']
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
if mrecsDict[f['datasetid']]:
f['datasetname'] = mrecsDict[f['datasetid']]
if len(files) > 0:
files = sorted(files, key=lambda x:x['pandaid'], reverse=True)
frec = files[0]
file = frec['lfn']
colnames = frec.keys()
colnames.sort()
for k in colnames:
val = frec[k]
if frec[k] == None:
val = ''
continue
pair = { 'name' : k, 'value' : val }
columns.append(pair)
del request.session['TFIRST']
del request.session['TLAST']
for file_ in files:
if 'startevent' in file_:
if (file_['startevent'] != None):
file_['startevent'] += 1
if 'endevent' in file_:
if (file_['endevent'] != None):
file_['endevent'] += 1
if ((len(files) > 0) and ('jeditaskid' in files[0]) and ('startevent' in files[0]) and (files[0]['jeditaskid'] != None)):
files = sorted(files, key=lambda k: (-k['jeditaskid'], k['startevent']))
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'frec' : frec,
'files' : files,
'filename' : file,
'columns' : columns,
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('fileInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
data = {
'frec' : frec,
'files' : files,
'filename' : file,
'columns' : columns,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), mimetype='text/html')
def fileList(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365*24, limit=999999999)
query = {}
files = []
frec = None
colnames = []
columns = []
datasetname = ''
datasetid = 0
#### It's dangerous when dataset name is not unique over table
if 'datasetname' in request.session['requestParams']:
datasetname = request.session['requestParams']['datasetname']
dsets = JediDatasets.objects.filter(datasetname=datasetname).values()
if len(dsets) > 0:
datasetid = dsets[0]['datasetid']
elif 'datasetid' in request.session['requestParams']:
datasetid = request.session['requestParams']['datasetid']
dsets = JediDatasets.objects.filter(datasetid=datasetid).values()
if len(dsets) > 0:
datasetname = dsets[0]['datasetname']
files = []
limit = 100
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
sortOrder = None
reverse = None
sortby = ''
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'lfn-asc':
sortOrder = 'lfn'
elif sortby == 'lfn-desc':
sortOrder = 'lfn'
reverse = True
elif sortby == 'scope-asc':
sortOrder = 'scope'
elif sortby == 'scope-desc':
sortOrder = 'scope'
reverse = True
elif sortby == 'type-asc':
sortOrder = 'type'
elif sortby == 'type-desc':
sortOrder = 'type'
reverse = True
elif sortby == 'fsizemb-asc':
sortOrder = 'fsize'
elif sortby == 'fsizemb-desc':
sortOrder = 'fsize'
reverse = True
elif sortby == 'nevents-asc':
sortOrder = 'nevents'
elif sortby == 'nevents-desc':
sortOrder = 'nevents'
reverse = True
elif sortby == 'jeditaskid-asc':
sortOrder = 'jeditaskid'
elif sortby == 'jeditaskid-desc':
sortOrder = 'jeditaskid'
reverse = True
elif sortby == 'fileid-asc':
sortOrder = 'fileid'
elif sortby == 'fileid-desc':
sortOrder = 'jeditaskid'
reverse = True
elif sortby == 'attemptnr-asc':
sortOrder = 'attemptnr'
elif sortby == 'attemptnr-desc':
sortOrder = 'attemptnr'
reverse = True
elif sortby == 'status-asc':
sortOrder = 'status'
elif sortby == 'status-desc':
sortOrder = 'status'
reverse = True
elif sortby == 'creationdate-asc':
sortOrder = 'creationdate'
elif sortby == 'creationdate-desc':
sortOrder = 'creationdate'
reverse = True
elif sortby == 'pandaid-asc':
sortOrder = 'pandaid'
elif sortby == 'pandaid-desc':
sortOrder = 'pandaid'
reverse = True
else:
sortOrder = 'lfn'
if datasetid > 0:
query['datasetid'] = datasetid
if (reverse):
files = JediDatasetContents.objects.filter(**query).values().order_by(sortOrder).reverse()[:limit+1]
else:
files = JediDatasetContents.objects.filter(**query).values().order_by(sortOrder)[:limit+1]
if len(files) > limit:
limitexceeded = True
else:
limitexceeded = False
files = files[:limit]
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize']/1000000.)
## Count the number of distinct files
filed = {}
for f in files:
filed[f['lfn']] = 1
nfiles = len(filed)
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby',mode='extensible')
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'files' : files,
'nfiles' : nfiles,
'nosorturl' : nosorturl,
'sortby' : sortby,
'limitexceeded':limitexceeded
}
##self monitor
endSelfMonitor(request)
data.update(getContextVariables(request))
response = render_to_response('fileList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
return HttpResponse(json.dumps(files), mimetype='text/html')
@cache_page(60*20)
def workQueues(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=180*24, limit=9999999)
query = {}
for param in request.session['requestParams']:
for field in JediWorkQueue._meta.get_all_field_names():
if param == field:
query[param] = request.session['requestParams'][param]
queues = JediWorkQueue.objects.filter(**query).order_by('queue_type','queue_order').values()
#queues = sorted(queues, key=lambda x:x['queue_name'],reverse=True)
del request.session['TFIRST']
del request.session['TLAST']
if ( not ( ('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])):
data = {
'request' : request,
'viewParams' : request.session['viewParams'],
'requestParams' : request.session['requestParams'],
'queues': queues,
'xurl' : extensibleURL(request),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('workQueues.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes']*60)
return response
else:
return HttpResponse(json.dumps(queues), mimetype='text/html')
def stateNotUpdated(request, state='transferring', hoursSinceUpdate=36, values = standard_fields, count = False, wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid: return response
query = setupView(request, opmode='notime', limit=99999999)
if 'jobstatus' in request.session['requestParams']: state = request.session['requestParams']['jobstatus']
if 'transferringnotupdated' in request.session['requestParams']: hoursSinceUpdate = int(request.session['requestParams']['transferringnotupdated'])
if 'statenotupdated' in request.session['requestParams']: hoursSinceUpdate = int(request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24*30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
#query['statechangetime__gte'] = mindate
query['jobstatus'] = state
if count:
jobs = []
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')))
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud','computingsite','jobstatus').annotate(Count('jobstatus')))
ncount = 0
perCloud = {}
perRCloud = {}
for cloud in cloudList:
perCloud[cloud] = 0
perRCloud[cloud] = 0
for job in jobs:
site = job['computingsite']
if site in homeCloud:
cloud = homeCloud[site]
if not cloud in perCloud:
perCloud[cloud] = 0
perCloud[cloud] += job['jobstatus__count']
cloud = job['cloud']
if not cloud in perRCloud:
perRCloud[cloud] = 0
perRCloud[cloud] += job['jobstatus__count']
ncount += job['jobstatus__count']
perCloudl = []
for c in perCloud:
pcd = { 'name' : c, 'count' : perCloud[c] }
perCloudl.append(pcd)
perCloudl = sorted(perCloudl, key=lambda x:x['name'])
perRCloudl = []
for c in perRCloud:
pcd = { 'name' : c, 'count' : perRCloud[c] }
perRCloudl.append(pcd)
perRCloudl = sorted(perRCloudl, key=lambda x:x['name'])
return ncount, perCloudl, perRCloudl
else:
jobs = []
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
return jobs
def taskNotUpdated(request, query, state='submitted', hoursSinceUpdate=36, values = [], count = False, wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid: return response
#query = setupView(request, opmode='notime', limit=99999999)
if 'status' in request.session['requestParams']: state = request.session['requestParams']['status']
if 'statenotupdated' in request.session['requestParams']: hoursSinceUpdate = int(request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24*30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
#query['statechangetime__gte'] = mindate
query['status'] = state
if count:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values('name','status').annotate(Count('status'))
statecounts = {}
for s in taskstatelist:
statecounts[s] = {}
statecounts[s]['count'] = 0
statecounts[s]['name'] = s
ncount = 0
for task in tasks:
state = task['status']
statecounts[state]['count'] += task['status__count']
ncount += job['status__count']
return ncount, statecounts
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values()
return tasks
def getErrorDescription(job, mode='html'):
txt = ''
if 'metastruct' in job and job['metastruct']['exitCode'] != 0:
meta = job['metastruct']
txt += "%s: %s" % (meta['exitAcronym'], meta['exitMsg'])
return txt
for errcode in errorCodes.keys():
errval = 0
if job.has_key(errcode):
errval = job[errcode]
if errval != 0 and errval != '0' and errval != None and errval != '':
try:
errval = int(errval)
except:
pass # errval = -1
errdiag = errcode.replace('errorcode','errordiag')
if errcode.find('errorcode') > 0:
diagtxt = job[errdiag]
else:
diagtxt = ''
if len(diagtxt) > 0:
desc = diagtxt
elif errval in errorCodes[errcode]:
desc = errorCodes[errcode][errval]
else:
desc = "Unknown %s error code %s" % ( errcode, errval )
errname = errcode.replace('errorcode','')
errname = errname.replace('exitcode','')
if mode == 'html':
txt += " <b>%s:</b> %s" % ( errname, desc )
else:
txt = "%s: %s" % ( errname, desc )
return txt
def getPilotCounts(view):
query = {}
query['flag'] = view
query['hours'] = 3
rows = Sitedata.objects.filter(**query).values()
pilotd = {}
for r in rows:
site = r['site']
if not site in pilotd: pilotd[site] = {}
pilotd[site]['count'] = r['getjob'] + r['updatejob']
pilotd[site]['time'] = r['lastmod']
return pilotd
def taskNameDict(jobs):
## Translate IDs to names. Awkward because models don't provide foreign keys to task records.
taskids = {}
jeditaskids = {}
for job in jobs:
if 'taskid' in job and job['taskid'] and job['taskid'] > 0:
taskids[job['taskid']] = 1
if 'jeditaskid' in job and job['jeditaskid'] and job['jeditaskid'] > 0: jeditaskids[job['jeditaskid']] = 1
taskidl = taskids.keys()
jeditaskidl = jeditaskids.keys()
tasknamedict = {}
if len(jeditaskidl) > 0:
tq = { 'jeditaskid__in' : jeditaskidl }
jeditasks = JediTasks.objects.filter(**tq).values('taskname', 'jeditaskid')
for t in jeditasks:
tasknamedict[t['jeditaskid']] = t['taskname']
#if len(taskidl) > 0:
# tq = { 'taskid__in' : taskidl }
# oldtasks = Etask.objects.filter(**tq).values('taskname', 'taskid')
# for t in oldtasks:
# tasknamedict[t['taskid']] = t['taskname']
return tasknamedict
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
return json.JSONEncoder.default(self, obj)
def getFilePathForObjectStore(objectstore, filetype="logs"):
""" Return a proper file path in the object store """
# For single object stores
# root://atlas-objectstore.cern.ch/|eventservice^/atlas/eventservice|logs^/atlas/logs
# For multiple object stores
# eventservice^root://atlas-objectstore.cern.ch//atlas/eventservice|logs^root://atlas-objectstore.bnl.gov//atlas/logs
basepath = ""
# Which form of the schedconfig.objectstore field do we currently have?
if objectstore != "":
_objectstore = objectstore.split("|")
if "^" in _objectstore[0]:
for obj in _objectstore:
if obj[:len(filetype)] == filetype:
basepath = obj.split("^")[1]
break
else:
_objectstore = objectstore.split("|")
url = _objectstore[0]
for obj in _objectstore:
if obj[:len(filetype)] == filetype:
basepath = obj.split("^")[1]
break
if basepath != "":
if url.endswith('/') and basepath.startswith('/'):
basepath = url + basepath[1:]
else:
basepath = url + basepath
if basepath == "":
print "Object store path could not be extracted using file type \'%s\' from objectstore=\'%s\'" % (filetype, objectstore)
else:
print "Object store not defined in queuedata"
return basepath
def buildGoogleFlowDiagram(request, jobs=[], tasks=[]):
## set up google flow diagram
if 'flow' not in request.session['requestParams']: return None
flowstruct = {}
if len(jobs) > 0:
flowstruct['maxweight'] = len(jobs)
flowrows = buildGoogleJobFlow(jobs)
elif len(tasks) > 0:
flowstruct['maxweight'] = len(tasks)
flowrows = buildGoogleTaskFlow(request, tasks)
else:
return None
flowstruct['columns'] = [ ['string', 'From'], ['string', 'To'], ['number', 'Weight'] ]
flowstruct['rows'] = flowrows[:3000]
return flowstruct
def buildGoogleJobFlow(jobs):
cloudd = {}
mcpcloudd = {}
mcpshownd = {}
errd = {}
errshownd = {}
sited = {}
statd = {}
errcountd = {}
sitecountd = {}
siteshownd = {}
ptyped = {}
ptypecountd = {}
ptypeshownd = {}
for job in jobs:
errinfo = errorInfo(job,nchars=40,mode='string')
jobstatus = job['jobstatus']
for js in ( 'finished', 'holding', 'merging', 'running', 'cancelled', 'transferring', 'starting' ):
if jobstatus == js: errinfo = js
if errinfo not in errcountd: errcountd[errinfo] = 0
errcountd[errinfo] += 1
cloud = job['homecloud']
mcpcloud = job['cloud']
ptype = job['processingtype']
if ptype not in ptypecountd: ptypecountd[ptype] = 0
ptypecountd[ptype] += 1
site = job['computingsite']
if site not in sitecountd: sitecountd[site] = 0
sitecountd[site] += 1
if cloud not in cloudd: cloudd[cloud] = {}
if site not in cloudd[cloud]: cloudd[cloud][site] = 0
cloudd[cloud][site] += 1
if mcpcloud not in mcpcloudd: mcpcloudd[mcpcloud] = {}
if cloud not in mcpcloudd[mcpcloud]: mcpcloudd[mcpcloud][cloud] = 0
mcpcloudd[mcpcloud][cloud] += 1
if jobstatus not in errd: errd[jobstatus] = {}
if errinfo not in errd[jobstatus]: errd[jobstatus][errinfo] = 0
errd[jobstatus][errinfo] += 1
if site not in sited: sited[site] = {}
if errinfo not in sited[site]: sited[site][errinfo] = 0
sited[site][errinfo] += 1
if jobstatus not in statd: statd[jobstatus] = {}
if errinfo not in statd[jobstatus]: statd[jobstatus][errinfo] = 0
statd[jobstatus][errinfo] += 1
if ptype not in ptyped: ptyped[ptype] = {}
if errinfo not in ptyped[ptype]: ptyped[ptype][errinfo] = 0
ptyped[ptype][errinfo] += 1
flowrows = []
for mcpcloud in mcpcloudd:
for cloud in mcpcloudd[mcpcloud]:
n = mcpcloudd[mcpcloud][cloud]
if float(n)/len(jobs)>0.0:
mcpshownd[mcpcloud] = 1
flowrows.append( [ "%s MCP" % mcpcloud, cloud, n ] )
othersited = {}
othersiteErrd = {}
for cloud in cloudd:
if cloud not in mcpshownd: continue
for e in cloudd[cloud]:
n = cloudd[cloud][e]
if float(sitecountd[e])/len(jobs)>.01:
siteshownd[e] = 1
flowrows.append( [ cloud, e, n ] )
else:
flowrows.append( [ cloud, 'Other sites', n ] )
othersited[e] = n
#for jobstatus in errd:
# for errinfo in errd[jobstatus]:
# flowrows.append( [ errinfo, jobstatus, errd[jobstatus][errinfo] ] )
for e in errcountd:
if float(errcountd[e])/len(jobs)>.01:
errshownd[e] = 1
for site in sited:
nother = 0
for e in sited[site]:
n = sited[site][e]
if site in siteshownd:
sitename = site
else:
sitename = "Other sites"
if e in errshownd:
errname = e
else:
errname = 'Other errors'
flowrows.append( [ sitename, errname, n ] )
if errname not in othersiteErrd: othersiteErrd[errname] = 0
othersiteErrd[errname] += n
#for e in othersiteErrd:
# if e in errshownd:
# flowrows.append( [ 'Other sites', e, othersiteErrd[e] ] )
for ptype in ptyped:
if float(ptypecountd[ptype])/len(jobs)>.05:
ptypeshownd[ptype] = 1
ptname = ptype
else:
ptname = "Other processing types"
for e in ptyped[ptype]:
n = ptyped[ptype][e]
if e in errshownd:
flowrows.append( [ e, ptname, n ] )
else:
flowrows.append( [ 'Other errors', ptname, n ] )
return flowrows
def buildGoogleTaskFlow(request, tasks):
analysis = 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('anal')
ptyped = {}
reqd = {}
statd = {}
substatd = {}
trfd = {}
filestatd = {}
cloudd = {}
reqsized = {}
reqokd = {}
## count the reqid's. Use only the biggest (in file count) if too many.
for task in tasks:
if not analysis and 'deftreqid' not in task: continue
req = int(task['reqid'])
dsinfo = task['dsinfo']
nfiles = dsinfo['nfiles']
if req not in reqsized: reqsized[req] = 0
reqsized[req] += nfiles
## Veto requests that are all done etc.
if task['superstatus'] != 'done': reqokd[req] = 1
if not analysis:
for req in reqsized:
# de-prioritize requests not specifically OK'd for inclusion
if req not in reqokd: reqsized[req] = 0
nmaxreq = 10
if len(reqsized) > nmaxreq:
reqkeys = reqsized.keys()
reqsortl = sorted(reqkeys, key=reqsized.__getitem__, reverse=True)
reqsortl = reqsortl[:nmaxreq-1]
else:
reqsortl = reqsized.keys()
for task in tasks:
ptype = task['processingtype']
#if 'jedireqid' not in task: continue
req = int(task['reqid'])
if not analysis and req not in reqsortl: continue
stat = task['superstatus']
substat = task['status']
#trf = task['transpath']
trf = task['taskname']
cloud = task['cloud']
if cloud == '': cloud = 'No cloud assigned'
dsinfo = task['dsinfo']
nfailed = dsinfo['nfilesfailed']
nfinished = dsinfo['nfilesfinished']
nfiles = dsinfo['nfiles']
npending = nfiles - nfailed - nfinished
if ptype not in ptyped: ptyped[ptype] = {}
if req not in ptyped[ptype]: ptyped[ptype][req] = 0
ptyped[ptype][req] += nfiles
if req not in reqd: reqd[req] = {}
if stat not in reqd[req]: reqd[req][stat] = 0
reqd[req][stat] += nfiles
if trf not in trfd: trfd[trf] = {}
if stat not in trfd[trf]: trfd[trf][stat] = 0
trfd[trf][stat] += nfiles
if stat not in statd: statd[stat] = {}
if substat not in statd[stat]: statd[stat][substat] = 0
statd[stat][substat] += nfiles
if substat not in substatd: substatd[substat] = {}
if 'finished' not in substatd[substat]:
for filestat in ('finished', 'failed', 'pending'):
substatd[substat][filestat] = 0
substatd[substat]['finished'] += nfinished
substatd[substat]['failed'] += nfailed
substatd[substat]['pending'] += npending
if cloud not in cloudd: cloudd[cloud] = {}
if 'finished' not in cloudd[cloud]:
for filestat in ('finished', 'failed', 'pending'):
cloudd[cloud][filestat] = 0
cloudd[cloud]['finished'] += nfinished
cloudd[cloud]['failed'] += nfailed
cloudd[cloud]['pending'] += npending
flowrows = []
if analysis:
## Don't include request, task for analysis
for trf in trfd:
for stat in trfd[trf]:
n = trfd[trf][stat]
flowrows.append( [ trf, 'Task %s' % stat, n ] )
else:
for ptype in ptyped:
for req in ptyped[ptype]:
n = ptyped[ptype][req]
flowrows.append( [ ptype, 'Request %s' % req, n ] )
for req in reqd:
for stat in reqd[req]:
n = reqd[req][stat]
flowrows.append( [ 'Request %s' % req, 'Task %s' % stat, n ] )
for stat in statd:
for substat in statd[stat]:
n = statd[stat][substat]
flowrows.append( [ 'Task %s' % stat, 'Substatus %s' % substat, n ] )
for substat in substatd:
for filestat in substatd[substat]:
if filestat not in substatd[substat]: continue
n = substatd[substat][filestat]
flowrows.append( [ 'Substatus %s' % substat, 'File status %s' % filestat, n ] )
for cloud in cloudd:
for filestat in cloudd[cloud]:
if filestat not in cloudd[cloud]: continue
n = cloudd[cloud][filestat]
flowrows.append( [ 'File status %s' % filestat, cloud, n ] )
return flowrows
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
# This function created backend dependable for avoiding numerous arguments in metadata query.
# Transaction and cursors used due to possible issues with django connection pooling
def addJobMetadata(jobs, require = False):
print 'adding metadata'
pids = []
for job in jobs:
if (job['jobstatus'] == 'failed' or require): pids.append(job['pandaid'])
query = {}
query['pandaid__in'] = pids
mdict = {}
## Get job metadata
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
metaTableName = "ATLAS_PANDA.METATABLE"
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
metaTableName = "METATABLE"
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
for id in pids:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (tmpTableName,id,transactionKey)) # Backend dependable
connection.commit()
new_cur.execute("SELECT METADATA,MODIFICATIONTIME,PANDAID FROM %s WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (metaTableName, tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
for m in mrecs:
try:
mdict[m['PANDAID']] = m['METADATA']
except:
pass
for job in jobs:
if job['pandaid'] in mdict:
try:
job['metastruct'] = json.loads(mdict[job['pandaid']].read())
except:
pass
#job['metadata'] = mdict[job['pandaid']]
print 'added metadata'
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
connection.commit()
connection.leave_transaction_management()
return jobs
##self monitor
def g4exceptions(request):
valid, response = initRequest(request)
setupView(request, hours=365*24, limit=999999999)
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
else:
hours = 3
query,wildCardExtension,LAST_N_HOURS_MAX = setupView(request, hours=hours, wildCardExt=True)
query['jobstatus__in'] = [ 'failed', 'holding' ]
query['exeerrorcode'] = 68
query['exeerrordiag__icontains'] = 'G4 exception'
values = 'pandaid', 'atlasrelease', 'exeerrorcode', 'exeerrordiag', 'jobstatus', 'transformation'
jobs = []
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__range'][0], "%Y-%m-%d %H:%M:%S" )).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__range'][1], "%Y-%m-%d %H:%M:%S" )).days > 1)):
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(*values))
if 'amitag' in request.session['requestParams']:
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
connection.enter_transaction_management()
new_cur = connection.cursor()
for job in jobs:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (tmpTableName,job['pandaid'],transactionKey)) # Backend dependable
connection.commit()
new_cur.execute("SELECT JOBPARAMETERS, PANDAID FROM ATLAS_PANDA.JOBPARAMSTABLE WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
connection.commit()
connection.leave_transaction_management()
jobsToRemove = set()
for rec in mrecs:
acceptJob = True
parameters = rec['JOBPARAMETERS'].read()
tagName = "--AMITag"
startPos = parameters.find(tagName)
if startPos == -1:
acceptJob = False
endPos = parameters.find(" ", startPos)
AMITag = parameters[startPos+len(tagName)+1:endPos]
if AMITag != request.session['requestParams']['amitag']:
acceptJob = False
if acceptJob == False:
jobsToRemove.add(rec['PANDAID'])
jobs = filter(lambda x: not x['pandaid'] in jobsToRemove, jobs)
jobs = addJobMetadata(jobs, True)
errorFrequency = {}
errorJobs = {}
for job in jobs:
if (job['metastruct']['executor'][0]['logfileReport']['countSummary']['FATAL'] > 0):
message = job['metastruct']['executor'][0]['logfileReport']['details']['FATAL'][0]['message']
exceptMess = message[message.find("G4Exception :") + 14 : message.find("issued by :") -1 ]
if exceptMess not in errorFrequency:
errorFrequency[exceptMess] = 1
else:
errorFrequency[exceptMess] += 1
if exceptMess not in errorJobs:
errorJobs[exceptMess] = []
errorJobs[exceptMess].append(job['pandaid'])
else:
errorJobs[exceptMess].append(job['pandaid'])
resp = {'errorFrequency': errorFrequency, 'errorJobs':errorJobs}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(resp), content_type='text/plain')
def initSelfMonitor(request):
import psutil
server=request.session['hostname'],
if 'HTTP_X_FORWARDED_FOR' in request.META:
remote=request.META['HTTP_X_FORWARDED_FOR']
else:
remote=request.META['REMOTE_ADDR']
urlProto=request.META['wsgi.url_scheme']
if 'HTTP_X_FORWARDED_PROTO' in request.META:
urlProto=request.META['HTTP_X_FORWARDED_PROTO']
urlProto=str(urlProto)+"://"
try:
urls=urlProto+request.META['SERVER_NAME']+request.META['REQUEST_URI']
except:
urls='localhost'
qtime =str(timezone.now())
load=psutil.cpu_percent(interval=1)
mem=psutil.virtual_memory().percent
request.session["qtime"] = qtime
request.session["load"] = load
request.session["remote"] = remote
request.session["mem"] = mem
request.session["urls"] = urls
def endSelfMonitor(request):
qduration=str(timezone.now())
request.session['qduration'] = qduration
try:
duration = (datetime.strptime(request.session['qduration'], "%Y-%m-%d %H:%M:%S.%f") - datetime.strptime(request.session['qtime'], "%Y-%m-%d %H:%M:%S.%f")).seconds
except:
duration =0
reqs = RequestStat(
server = request.session['hostname'],
qtime = request.session['qtime'],
load = request.session['load'],
mem = request.session['mem'],
qduration = request.session['qduration'],
duration = duration,
remote = request.session['remote'],
urls = request.session['urls'],
description=' '
)
reqs.save()
|
tkorchug/panda-bigmon-core
|
core/views.py
|
Python
|
apache-2.0
| 340,870
|
[
"VisIt"
] |
df51f7258c046fa48374ecc2faf90461f15d8ce88ba773ee5323d5a9765b7dd3
|
import numpy
import mlpy
import time
import scipy
import os
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.lda import LDA
import csv
import os.path
import sklearn
import sklearn.hmm
import cPickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag < len(Flags) - 1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop == 0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag == len(Flags) - 1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <= segEnd[i]:
break
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []
segEnd = []
segLabel = []
for row in reader:
if len(row) == 3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min(flagsInd.shape[0], flagsIndGT.shape[0])
if minLength > 0:
accuracy = numpy.count_nonzero(flagsInd[0:minLength] == flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1]
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg, 1]-segs[iSeg, 0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0 * SPercentages[i] / Duration
S = sum(1 for c in classes if c == classNames[i])
if S > 0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print classNames[i], Percentages[i], AvDurations[i]
font = {'family': 'fantasy', 'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(flagsInd))) * mtStep + mtStep / 2.0, flagsInd)
if flagsIndGT.shape[0] > 0:
ax1.plot(numpy.array(range(len(flagsIndGT))) * mtStep + mtStep / 2.0, flagsIndGT + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(classNames) + 1, 0, 100))
ax2.set_xticks(numpy.array(range(len(classNames) + 1)))
ax2.set_xticklabels([" "] + classNames)
ax2.bar(numpy.array(range(len(classNames))) + 0.5, Percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
ax3.set_xticks(numpy.array(range(len(classNames) + 1)))
ax3.set_xticklabels([" "] + classNames)
ax3.bar(numpy.array(range(len(classNames))) + 0.5, AvDurations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min(flags.shape[0], flagsGT.shape[0])
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros((uFlags.shape[0], uFlagsGT.shape[0]))
for i in range(minLength):
cMatrix[int(numpy.nonzero(uFlags == flags[i])[0]), int(numpy.nonzero(uFlagsGT == flagsGT[i])[0])] += 1.0
Nc, Ns = cMatrix.shape
N_s = numpy.sum(cMatrix, axis=0)
N_c = numpy.sum(cMatrix, axis=1)
N = numpy.sum(cMatrix)
purityCluster = numpy.zeros((Nc, ))
puritySpeaker = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max((cMatrix[i, :])) / (N_c[i])
for j in range(Ns):
puritySpeaker[j] = numpy.max((cMatrix[:, j])) / (N_s[j])
purityClusterMean = numpy.sum(purityCluster * N_c) / N
puritySpeakerMean = numpy.sum(puritySpeaker * N_s) / N
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print "trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!"
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i, u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels == u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == uLabels[i])[0]], axis=1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read ground truth data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to fix-sized sequence of flags
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
#F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
startprob, transmat, means, cov = trainHMM_computeStatistics(F, flags) # compute HMM statistics (priors, transition matrix, etc)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # output to file
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classNames
def trainHMM_fromDir(dirPath, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
flagsAll = numpy.array([])
classesAll = []
for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
wavFile = f
gtFile = f.replace('.wav', '.segments') # open for annotated file
if not os.path.isfile(gtFile): # if current WAV file does not have annotation -> skip
continue
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
for c in classNames: # update classnames:
if c not in classesAll:
classesAll.append(c)
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
lenF = F.shape[1]
lenL = len(flags)
MIN = min(lenF, lenL)
F = F[:, 0:MIN]
flags = flags[0:MIN]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append(classesAll.index(classNames[flags[j]]))
flagsAll = numpy.append(flagsAll, numpy.array(flagsNew))
if i == 0:
Fall = F
else:
Fall = numpy.concatenate((Fall, F), axis=1)
startprob, transmat, means, cov = trainHMM_computeStatistics(Fall, flagsAll) # compute HMM statistics
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # train HMM
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # save HMM model
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classesAll, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classesAll
def hmmSegmentation(wavFileName, hmmModelName, PLOT=False, gtFileName=""):
[Fs, x] = audioBasicIO.readAudioFile(wavFileName) # read audio data
try:
fo = open(hmmModelName, "rb")
except IOError:
print "didn't find file"
return
try:
hmm = cPickle.load(fo)
classesAll = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
except:
fo.close()
fo.close()
#Features = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs); # feature extraction
[Features, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050))
flagsInd = hmm.predict(Features.T) # apply model
#for i in range(len(flagsInd)):
# if classesAll[flagsInd[i]]=="silence":
# flagsInd[i]=classesAll.index("speech")
# plot results
if os.path.isfile(gtFileName):
[segStart, segEnd, segLabels] = readSegmentGT(gtFileName)
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)
flagsGTNew = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classesAll:
flagsGTNew.append(classesAll.index(classNamesGT[flagsGT[j]]))
else:
flagsGTNew.append(-1)
flagsIndGT = numpy.array(flagsGTNew)
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classesAll, mtStep, not PLOT)
if acc >= 0:
print "Overall Accuracy: {0:.2f}".format(acc)
return flagsInd, classesAll, acc
def mtFileClassification(inputFile, modelName, modelType, plotResults=False, gtFile=""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- inputFile: path of the input WAV file
- modelName: name of the classification model
- modelType: svm or knn depending on the classifier type
- plotResults: True if results are to be plotted using matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
if not os.path.isfile(modelName):
print "mtFileClassificationError: input modelType not found!"
return (-1, -1, -1)
# Load classifier:
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
if computeBEAT:
print "Model " + modelName + " contains long-term music features (beat etc) and cannot be used in segmentation"
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # load input file
if Fs == -1: # could not read file
return (-1, -1, -1)
x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono
Duration = len(x) / Fs
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
flags = []
Ps = []
flagsInd = []
for i in range(MidTermFeatures.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
curFV = (MidTermFeatures[:, i] - MEAN) / STD # normalize current feature vector
[Result, P] = aT.classifierWrapper(Classifier, modelType, curFV) # classify vector
flagsInd.append(Result)
flags.append(classNames[int(Result)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flagsInd = numpy.array(flagsInd)
# 1-window smoothing
for i in range(1, len(flagsInd) - 1):
if flagsInd[i-1] == flagsInd[i + 1]:
flagsInd[i] = flagsInd[i + 1]
(segs, classes) = flags2segs(flags, mtStep) # convert fix-sized flags to segments and classes
segs[-1] = len(x) / float(Fs)
# Load grount-truth:
if os.path.isfile(gtFile):
[segStartGT, segEndGT, segLabelsGT] = readSegmentGT(gtFile)
flagsGT, classNamesGT = segs2flags(segStartGT, segEndGT, segLabelsGT, mtStep)
flagsIndGT = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classNames:
flagsIndGT.append(classNames.index(classNamesGT[flagsGT[j]]))
else:
flagsIndGT.append(-1)
flagsIndGT = numpy.array(flagsIndGT)
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, not plotResults)
if acc >= 0:
print "Overall Accuracy: {0:.3f}".format(acc)
return (flagsInd, classNames, acc)
def evaluateSegmentationClassificationDir(dirName, modelName, methodName):
flagsAll = numpy.array([])
classesAll = []
accuracys = []
for i, f in enumerate(glob.glob(dirName + os.sep + '*.wav')): # for each WAV file
wavFile = f
print wavFile
gtFile = f.replace('.wav', '.segments') # open for annotated file
if methodName.lower() in ["svm", "knn"]:
flagsInd, classNames, acc = mtFileClassification(wavFile, modelName, methodName, False, gtFile)
else:
flagsInd, classNames, acc = hmmSegmentation(wavFile, modelName, False, gtFile)
if acc > -1:
accuracys.append(acc)
print " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
print "Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).mean())
print "Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracys)))
print "Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).min())
print "Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).max())
def silenceRemoval(x, Fs, stWin, stStep, smoothWindow=0.5, Weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- Fs: sampling freq
- stWin, stStep: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- Weight: (optinal) weight factor (0 < Weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight >= 1:
Weight = 0.99
if Weight <= 0:
Weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x) # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin * Fs, stStep * Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E) / 10) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:, numpy.where(EnergySt < T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:, numpy.where(EnergySt > T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T] # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:, i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.pred_probability(curFV)[1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10
T = (numpy.mean((1 - Weight) * ProbOnsetSorted[0:Nt]) + Weight * numpy.mean(ProbOnsetSorted[-Nt::]))
MaxIdx = numpy.where(ProbOnset > T)[0] # get the indices of the frames that satisfy the thresholding
i = 0
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i < len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i == len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i == len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0] * stStep, curCluster[-1] * stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs), 1.0 / Fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset)
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('SVM Probability')
# plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel("data/knnSpeakerAll")
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel("data/knnSpeakerFemaleMale")
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))
MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
# 97,98, 99,100]; # SET 0C
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin))
mtStepRatio = int(round(stWin / stWin))
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = LDA(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels, tol=0.000001)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers <= 0:
sRange = range(2, 10)
else:
sRange = [numOfSpeakers]
clsAll = []
silAll = []
centersAll = []
for iSpeakers in sRange:
cls, means, steps = mlpy.kmeans(MidTermFeaturesNorm.T, k=iSpeakers, plus=True) # perform k-means clustering
#YDist = distance.pdist(MidTermFeaturesNorm.T, metric='euclidean')
#print distance.squareform(YDist).shape
#hc = mlpy.HCluster()
#hc.linkage(YDist)
#cls = hc.cut(14.5)
#print cls
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of the maximum sillouette value
nSpeakersFinal = sRange[imax] # optimal number of clusters
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
cls = numpy.zeros((numOfWindows,))
for i in range(numOfWindows):
j = numpy.argmin(numpy.abs(i-iNonOutLiers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(MidTermFeaturesNormOr.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = silAll[imax] # final sillouette
classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gtFile = fileName.replace('.wav', '.segments'); # open for annotated file
if os.path.isfile(gtFile): # if groundturh exists
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
if PLOT:
fig = plt.figure()
if numOfSpeakers>0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(cls)))*mtStep+mtStep/2.0, cls)
if os.path.isfile(gtFile):
if PLOT:
ax1.plot(numpy.array(range(len(flagsGT)))*mtStep+mtStep/2.0, flagsGT, 'r')
purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
print "{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean)
if PLOT:
plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
if PLOT:
plt.xlabel("time (seconds)")
#print sRange, silAll
if numOfSpeakers<=0:
plt.subplot(212)
plt.plot(sRange, silAll)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
def speakerDiarizationEvaluateScript(folderName, LDAs):
'''
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth)
ARGUMENTS:
- folderName: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored
- LDAs: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folderName, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wavFile in wavFilesList:
gtFile = wavFile.replace('.wav', '.segments');
if os.path.isfile(gtFile):
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
N.append(len(list(set(segLabels))))
else:
N.append(-1)
for l in LDAs:
print "LDA = {0:d}".format(l)
for i, wavFile in enumerate(wavFilesList):
speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT = False)
print
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0):
'''
This function detects instances of the most representative part of a music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- Fs: sampling frequency
- shortTermSize: window size (in seconds)
- shortTermStep: window step (in seconds)
- thumbnailSize: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[Fs, x] = basicIO.readAudioFile(inputFile)
[A1, A2, B1, B2] = musicThumbnailing(x, Fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
# self-similarity matrix
S = selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
MIN = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN;
# find max position:
maxVal = numpy.max(S)
I = numpy.argmax(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return (shortTermStep*i1, shortTermStep*i2, shortTermStep*j1, shortTermStep*j2, S)
|
DynaLite/DynaLite_1.0
|
Sources/SpeechProcessing/pyAudioAnalysis/myAudioSegmentation.py
|
Python
|
mit
| 43,897
|
[
"Gaussian"
] |
9c8e03246ab6c71dcffc416732cd3750180981afa6d3fa0f2c27a85e2db2e01c
|
# Copyright 2008-2015 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "tab" (simple tab separated) file format.
You are expected to use this module via the Bio.SeqIO functions.
The "tab" format is an ad-hoc plain text file format where each sequence is
on one (long) line. Each line contains the identifier/description, followed
by a tab, followed by the sequence. For example, consider the following
short FASTA format file::
>ID123456 possible binding site?
CATCNAGATGACACTACGACTACGACTCAGACTAC
>ID123457 random sequence
ACACTACGACTACGACTCAGACTACAAN
Apart from the descriptions, this can be represented in the simple two column
tab separated format as follows::
ID123456(tab)CATCNAGATGACACTACGACTACGACTCAGACTAC
ID123457(tab)ACACTACGACTACGACTCAGACTACAAN
When reading this file, "ID123456" or "ID123457" will be taken as the record's
.id and .name property. There is no other information to record.
Similarly, when writing to this format, Biopython will ONLY record the record's
.id and .seq (and not the description or any other information) as in the
example above.
"""
from __future__ import print_function
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
def TabIterator(handle, alphabet=single_letter_alphabet):
"""Iterates over tab separated lines (as SeqRecord objects).
Each line of the file should contain one tab only, dividing the line
into an identifier and the full sequence.
Arguments:
- handle - input file
- alphabet - optional alphabet
The first field is taken as the record's .id and .name (regardless of
any spaces within the text) and the second field is the sequence.
Any blank lines are ignored.
Example:
>>> with open("GenBank/NC_005816.tsv") as handle:
... for record in TabIterator(handle):
... print("%s length %i" % (record.id, len(record)))
gi|45478712|ref|NP_995567.1| length 340
gi|45478713|ref|NP_995568.1| length 260
gi|45478714|ref|NP_995569.1| length 64
gi|45478715|ref|NP_995570.1| length 123
gi|45478716|ref|NP_995571.1| length 145
gi|45478717|ref|NP_995572.1| length 357
gi|45478718|ref|NP_995573.1| length 138
gi|45478719|ref|NP_995574.1| length 312
gi|45478720|ref|NP_995575.1| length 99
gi|45478721|ref|NP_995576.1| length 90
"""
for line in handle:
try:
title, seq = line.split("\t") # will fail if more than one tab!
except:
if line.strip() == "":
# It's a blank line, ignore it
continue
raise ValueError("Each line should have one tab separating the" +
" title and sequence, this line has %i tabs: %r"
% (line.count("\t"), line))
title = title.strip()
seq = seq.strip() # removes the trailing new line
yield SeqRecord(Seq(seq, alphabet),
id=title, name=title,
description="")
class TabWriter(SequentialSequenceWriter):
"""Class to write simple tab separated format files.
Each line consists of "id(tab)sequence" only.
Any description, name or other annotation is not recorded.
"""
def write_record(self, record):
"""Write a single tab line to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
title = self.clean(record.id)
seq = self._get_seq_string(record) # Catches sequence being None
assert "\t" not in title
assert "\n" not in title
assert "\r" not in title
assert "\t" not in seq
assert "\n" not in seq
assert "\r" not in seq
self.handle.write("%s\t%s\n" % (title, seq))
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest(verbose=0)
|
zjuchenyuan/BioWeb
|
Lib/Bio/SeqIO/TabIO.py
|
Python
|
mit
| 4,174
|
[
"Biopython"
] |
3b39bba564e80ca76d6e62a33dc57d255c946059bbd4a5ce74d3405546532ac5
|
"""Makes figure with saliency maps."""
import os
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from PIL import Image
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import saliency_maps
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import saliency_plotting
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
RADAR_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
RADAR_FIELD_NAMES = [
radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME,
radar_utils.SPECTRUM_WIDTH_NAME
]
MAX_COLOUR_PERCENTILE = 99.
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_NAME = 'DejaVu-Sans-Bold'
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
INPUT_FILES_ARG_NAME = 'input_saliency_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
MAX_VALUES_ARG_NAME = 'max_colour_values'
HALF_NUM_CONTOURS_ARG_NAME = 'half_num_contours'
SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INPUT_FILES_HELP_STRING = (
'List of saliency files (each will be read by `saliency.read_file`).'
)
COMPOSITE_NAMES_HELP_STRING = (
'List of composite names (one for each saliency file). This list must be '
'space-separated, but after reading the list, underscores within each item '
'will be replaced by spaces.'
)
COLOUR_MAP_HELP_STRING = (
'Colour scheme for saliency. Must be accepted by '
'`matplotlib.pyplot.get_cmap`.'
)
MAX_VALUES_HELP_STRING = (
'Max absolute saliency in each colour scheme (one per file). Use -1 to let'
' max value to be determined on the fly.'
)
HALF_NUM_CONTOURS_HELP_STRING = (
'Number of saliency contours on either side of zero (positive and '
'negative).'
)
SMOOTHING_RADIUS_HELP_STRING = (
'e-folding radius for Gaussian smoother (num grid cells). If you do not '
'want to smooth saliency maps, make this negative.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=INPUT_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='binary',
help=COLOUR_MAP_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MAX_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + HALF_NUM_CONTOURS_ARG_NAME, type=int, required=False,
default=10, help=HALF_NUM_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False,
default=1., help=SMOOTHING_RADIUS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _read_one_composite(saliency_file_name, smoothing_radius_grid_cells):
"""Reads saliency map for one composite.
E = number of examples
M = number of rows in grid
N = number of columns in grid
H = number of heights in grid
F = number of radar fields
:param saliency_file_name: Path to input file (will be read by
`saliency.read_file`).
:param smoothing_radius_grid_cells: Radius for Gaussian smoother, used only
for saliency map.
:return: mean_radar_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
radar fields.
:return: mean_saliency_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
saliency fields.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
"""
print('Reading data from: "{0:s}"...'.format(saliency_file_name))
saliency_dict = saliency_maps.read_file(saliency_file_name)[0]
mean_radar_matrix = numpy.expand_dims(
saliency_dict[saliency_maps.MEAN_PREDICTOR_MATRICES_KEY][0], axis=0
)
mean_saliency_matrix = numpy.expand_dims(
saliency_dict[saliency_maps.MEAN_SALIENCY_MATRICES_KEY][0], axis=0
)
if smoothing_radius_grid_cells is not None:
print((
'Smoothing saliency maps with Gaussian filter (e-folding radius of '
'{0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
num_fields = mean_radar_matrix.shape[-1]
for k in range(num_fields):
mean_saliency_matrix[0, ..., k] = (
general_utils.apply_gaussian_filter(
input_matrix=mean_saliency_matrix[0, ..., k],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
)
model_file_name = saliency_dict[saliency_maps.MODEL_FILE_KEY]
model_metafile_name = cnn.find_metafile(model_file_name)
print('Reading CNN metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
good_indices = numpy.array([
numpy.where(
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] == h
)[0][0]
for h in RADAR_HEIGHTS_M_AGL
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices, :]
mean_saliency_matrix = mean_saliency_matrix[..., good_indices, :]
good_indices = numpy.array([
training_option_dict[trainval_io.RADAR_FIELDS_KEY].index(f)
for f in RADAR_FIELD_NAMES
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices]
mean_saliency_matrix = mean_saliency_matrix[..., good_indices]
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
training_option_dict[trainval_io.RADAR_FIELDS_KEY] = RADAR_FIELD_NAMES
training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict
return mean_radar_matrix, mean_saliency_matrix, model_metadata_dict
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_NAME,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_one_composite(
saliency_file_name, composite_name_abbrev, composite_name_verbose,
colour_map_object, max_colour_value, half_num_contours,
smoothing_radius_grid_cells, output_dir_name):
"""Plots saliency map for one composite.
:param saliency_file_name: Path to input file (will be read by
`saliency.read_file`).
:param composite_name_abbrev: Abbrev composite name (will be used in file
names).
:param composite_name_verbose: Verbose composite name (will be used in
figure title).
:param colour_map_object: See documentation at top of file.
:param max_colour_value: Same.
:param half_num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Name of output directory (figures will be saved
here).
:return: main_figure_file_name: Path to main image file created by this
method.
:return: max_colour_value: See input doc.
"""
mean_radar_matrix, mean_saliency_matrix, model_metadata_dict = (
_read_one_composite(
saliency_file_name=saliency_file_name,
smoothing_radius_grid_cells=smoothing_radius_grid_cells)
)
if numpy.isnan(max_colour_value):
max_colour_value = numpy.percentile(
mean_saliency_matrix, MAX_COLOUR_PERCENTILE
)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
num_fields = mean_radar_matrix.shape[-1]
num_heights = mean_radar_matrix.shape[-2]
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=[mean_radar_matrix],
model_metadata_dict=model_metadata_dict, pmm_flag=True,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
num_panel_rows=num_heights)
figure_objects = handle_dict[plot_examples.RADAR_FIGURES_KEY]
axes_object_matrices = handle_dict[plot_examples.RADAR_AXES_KEY]
for k in range(num_fields):
this_saliency_matrix = mean_saliency_matrix[0, ..., k]
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=numpy.flip(this_saliency_matrix, axis=0),
axes_object_matrix=axes_object_matrices[k],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_colour_value,
contour_interval=max_colour_value / half_num_contours
)
panel_file_names = [None] * num_fields
for k in range(num_fields):
panel_file_names[k] = '{0:s}/{1:s}_{2:s}.jpg'.format(
output_dir_name, composite_name_abbrev,
field_names[k].replace('_', '-')
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[k]))
figure_objects[k].savefig(
panel_file_names[k], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_objects[k])
main_figure_file_name = '{0:s}/{1:s}_saliency.jpg'.format(
output_dir_name, composite_name_abbrev)
print('Concatenating panels to: "{0:s}"...'.format(main_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=main_figure_file_name,
num_panel_rows=1, num_panel_columns=num_fields, border_width_pixels=50)
imagemagick_utils.resize_image(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25)
_overlay_text(
image_file_name=main_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=10)
return main_figure_file_name, max_colour_value
def _add_colour_bar(figure_file_name, colour_map_object, max_colour_value,
temporary_dir_name):
"""Adds colour bar to saved image file.
:param figure_file_name: Path to saved image file. Colour bar will be added
to this image.
:param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`
or similar).
:param max_colour_value: Max value in colour scheme.
:param temporary_dir_name: Name of temporary output directory.
"""
this_image_matrix = Image.open(figure_file_name)
figure_width_px, figure_height_px = this_image_matrix.size
figure_width_inches = float(figure_width_px) / FIGURE_RESOLUTION_DPI
figure_height_inches = float(figure_height_px) / FIGURE_RESOLUTION_DPI
extra_figure_object, extra_axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
extra_axes_object.axis('off')
dummy_values = numpy.array([0., max_colour_value])
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=extra_axes_object, data_matrix=dummy_values,
colour_map_object=colour_map_object,
min_value=0., max_value=max_colour_value,
orientation_string='vertical', fraction_of_axis_length=1.25,
extend_min=False, extend_max=True, font_size=COLOUR_BAR_FONT_SIZE,
aspect_ratio=50.
)
tick_values = colour_bar_object.get_ticks()
if max_colour_value <= 0.005:
tick_strings = ['{0:.4f}'.format(v) for v in tick_values]
elif max_colour_value <= 0.05:
tick_strings = ['{0:.3f}'.format(v) for v in tick_values]
else:
tick_strings = ['{0:.2f}'.format(v) for v in tick_values]
colour_bar_object.set_ticks(tick_values)
colour_bar_object.set_ticklabels(tick_strings)
extra_file_name = '{0:s}/saliency_colour-bar.jpg'.format(temporary_dir_name)
print('Saving colour bar to: "{0:s}"...'.format(extra_file_name))
extra_figure_object.savefig(
extra_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(extra_figure_object)
print('Concatenating colour bar to: "{0:s}"...'.format(figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=[figure_file_name, extra_file_name],
output_file_name=figure_file_name,
num_panel_rows=1, num_panel_columns=2,
extra_args_string='-gravity Center'
)
os.remove(extra_file_name)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name
)
def _run(saliency_file_names, composite_names, colour_map_name,
max_colour_values, half_num_contours, smoothing_radius_grid_cells,
output_dir_name):
"""Makes figure with saliency maps for MYRORSS model.
This is effectively the main method.
:param saliency_file_names: See documentation at top of file.
:param composite_names: Same.
:param colour_map_name: Same.
:param max_colour_values: Same.
:param half_num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Same.
"""
# Process input args.
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
if smoothing_radius_grid_cells <= 0:
smoothing_radius_grid_cells = None
colour_map_object = pyplot.cm.get_cmap(colour_map_name)
error_checking.assert_is_geq(half_num_contours, 5)
num_composites = len(saliency_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
max_colour_values[max_colour_values <= 0] = numpy.nan
error_checking.assert_is_numpy_array(
max_colour_values, exact_dimensions=expected_dim
)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [
'({0:s}) {1:s}'.format(
chr(ord('a') + i), composite_names[i].replace('_', ' ')
)
for i in range(num_composites)
]
panel_file_names = [None] * num_composites
for i in range(num_composites):
panel_file_names[i], max_colour_values[i] = _plot_one_composite(
saliency_file_name=saliency_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
colour_map_object=colour_map_object,
max_colour_value=max_colour_values[i],
half_num_contours=half_num_contours,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
output_dir_name=output_dir_name
)
_add_colour_bar(
figure_file_name=panel_file_names[i],
colour_map_object=colour_map_object,
max_colour_value=max_colour_values[i],
temporary_dir_name=output_dir_name
)
print('\n')
figure_file_name = '{0:s}/saliency_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(figure_file_name))
num_panel_rows = int(numpy.floor(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.ceil(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=figure_file_name, border_width_pixels=25,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name,
border_width_pixels=10
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
saliency_file_names=getattr(INPUT_ARG_OBJECT, INPUT_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
max_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MAX_VALUES_ARG_NAME), dtype=float
),
half_num_contours=getattr(INPUT_ARG_OBJECT, HALF_NUM_CONTOURS_ARG_NAME),
smoothing_radius_grid_cells=getattr(
INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME
),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
|
thunderhoser/GewitterGefahr
|
gewittergefahr/interpretation_paper_2019/make_saliency_figure.py
|
Python
|
mit
| 18,459
|
[
"Gaussian"
] |
bb2d09edaa4c80eb0d1c412a7a62e4ade3920c7eba65daffa82584f9bedaffdf
|
# Author: Robin Betz
#
# Copyright (C) 2015 Robin Betz
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307, USA.
"""
This module contains functions for manipulating molecules using
the VMD python API.
"""
from __future__ import print_function
import os
import tempfile
import numpy as np
from vmd import molecule, atomsel
from dabble import DabbleError
from dabble.fileutils import concatenate_mae_files
# pylint: disable=no-member
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Constants
__1M_SALT_IONS_PER_WATER = 0.018
#==============================================================================
def get_net_charge(sel, molid):
"""
Gets the net charge of an atom selection, using the charge
field of the data.
Args:
sel (str): VMD atom selection to compute the charge of
molid (int): VMD molecule id to select within
Returns:
(int): The rounded net charge of the selection
Throws:
ValueError: If charge does not round to an integer value
"""
charge = np.array(atomsel(sel, molid=molid).charge)
if charge.size == 0:
return 0
print("Calculating charge on %d atoms" % charge.size)
# Check the system has charges defined
if all(charge == 0):
print("\nWARNING: All charges in selection are zero. "
"Check the input file has formal charges defined!\n"
"Selection was:\n%s\n"%sel)
print(set(charge))
# Round to nearest integer nd check this is okay
net_charge = sum(charge)
rslt = round(net_charge)
if abs(rslt - net_charge) > 0.05:
raise DabbleError("Total charge of %f is not integral within a "
"tolerance of 0.05. Check your input file."
% net_charge)
return int(rslt)
#==========================================================================
def get_system_net_charge(molid):
"""
Gets the net charge of the entire system.
What is in the system is defined by the beta field, as atoms that won't
be written have beta 0.
Args:
molid (int): VMD molecule id to compute the charge of
Returns:
(int): The net charge of the molecule
"""
return get_net_charge(sel='beta 1', molid=molid)
#==========================================================================
def diameter(coords, chunkmem=30e6):
"""
Returns the diameter of a set of xy coordinates.
Args:
coords (numpy array Nx2) : XY coordinates to get diameter of
chunkmem (int) : Chunk memory for calculation
Returns:
(float, float): The diameter of the stuff in both dimensions
"""
# pylint: disable=invalid-name
coords = np.require(coords, dtype=np.float32)
if coords.size == 0:
return 0
nper = int(chunkmem / (4*coords.size))
D = np.array([np.inner(x, x) for x in coords])
i = 0
d = 0
while i < len(coords):
M = np.inner(-2*coords, coords[i:i+nper])
M += D[i:i+nper]
M += D[:, None]
nd = M.max()
if nd > d:
d = nd
i += nper
return np.sqrt(d)
#==========================================================================
def solute_xy_diameter(solute_sel, molid):
"""
Returns the XY diameter of a set of atoms.
Args:
solute_sel (str): VMD atom selection to get diameter of
molid (int): VMD molecule ID to select within
Returns:
(float, float) X and Y diameter of the selection
"""
sol = atomsel(solute_sel, molid=molid)
return diameter(np.transpose([sol.x, sol.y]))
#==========================================================================
def get_num_salt_ions_needed(molid,
conc,
water_sel='water and element O',
cation='Na',
anion='Cl'):
"""
Gets the number of salt ions needed to put the system at a given
concentration of salt.
Args:
molid (int) : The VMD molecule ID to consider
conc (float) : Desired salt concentration
water_sel (str) : VMD atom selection for water
cation (str) : Cation to use, either Na or K right now
anion (str) : Anion to use, only Cl currently supported
Returns:
(float tuple) : # cations needed, # anions needed, number of waters
that will remain, total # cations, total # anions,
cation concentration, anion concentration
Raises:
Exception if number of cations and the net cation charge are
not equal (should never happen)
"""
# pylint: disable = too-many-branches, too-many-locals
cations = atomsel_remaining(molid, 'element %s' % cation)
anions = atomsel_remaining(molid, 'element %s' % anion)
molid = molecule.get_top()
try:
abs(get_net_charge(str(cations), molid)-len(cations)) > 0.01
except ValueError:
# Check for bonded cations
# Minimize the number of calls to atomsel
nonbonded_cation_index = [cations.index[i] \
for i in range(len(cations)) \
if len(cations.bonds[i]) == 0]
if not nonbonded_cation_index:
cations = atomsel('none')
else:
cations = atomsel_remaining(molid,
'index '+' '.join(nonbonded_cation_index))
if abs(get_net_charge(str(cations), molid)-len(cations)) < 0.01:
raise Exception('Num cations and net cation charge are not equal')
try:
abs(get_net_charge(str(anions), molid)+len(anions)) > 0.01
except ValueError:
# Check for bonded anions
nonbonded_anion_index = [anions.index[i] \
for i in range(len(anions)) \
if len(anions.bonds[i]) == 0]
if not nonbonded_anion_index:
anions = atomsel('none')
else:
anions = atomsel_remaining(molid,
'index '+' '.join(nonbonded_anion_index))
if abs(get_net_charge(str(anions), molid)+len(anions)) < 0.01:
raise Exception('num anions and abs anion charge are not equal')
num_waters = num_atoms_remaining(molid, water_sel)
num_for_conc = int(round(__1M_SALT_IONS_PER_WATER * num_waters * conc))
pos_ions_needed = num_for_conc - len(cations)
neg_ions_needed = num_for_conc - len(anions)
system_charge = get_system_net_charge(molid)
new_system_charge = system_charge + len(anions) - len(cations)
to_neutralize = abs(new_system_charge)
if new_system_charge > 0:
if to_neutralize > pos_ions_needed:
neg_ions_needed += to_neutralize - pos_ions_needed
pos_ions_needed = 0
else:
pos_ions_needed -= to_neutralize
else:
if to_neutralize > neg_ions_needed:
pos_ions_needed += to_neutralize - neg_ions_needed
neg_ions_needed = 0
neg_ions_needed -= to_neutralize
# Check for less than 0
pos_ions_needed = max(0, pos_ions_needed)
neg_ions_needed = max(0, neg_ions_needed)
total_cations = len(cations) + pos_ions_needed
total_anions = len(anions) + neg_ions_needed
# volume estimate from prev waters
cation_conc = (float(total_cations) / num_waters) / __1M_SALT_IONS_PER_WATER
anion_conc = (float(total_anions) / num_waters) / __1M_SALT_IONS_PER_WATER
num_waters -= pos_ions_needed + neg_ions_needed
return (pos_ions_needed,
neg_ions_needed,
num_waters,
total_cations,
total_anions,
cation_conc,
anion_conc)
#==========================================================================
def lipid_composition(lipid_sel, molid):
"""
Calculates the lipid composition of each leaflet of the membrane
Args:
lipid_sel (str): VMD selection string for lipid
molid (int) : VMD molecule ID to consider
Returns:
(int tuple) number of lipids on inner and outer membrane leaflets
"""
def leaflet(leaflet_sel):
"""
Returns the composition in one selected leaflet
"""
selstr = "not element H C and (%s) and (%s)" % (lipid_sel, leaflet_sel)
sel = atomsel_remaining(molid, selstr)
resnames = set(sel.resname)
dct = {s : len(set(atomsel_remaining(molid,
"%s and resname '%s'"
% (sel, s)).fragment))
for s in resnames}
return dct
inner, outer = leaflet('z < 0'), leaflet('not (z < 0)')
return inner, outer
#==========================================================================
def print_lipid_composition(lipid_sel, molid):
"""
Describes the composition of the inner and outer leaflet
Args:
molid (int): VMD molecule id to look at
lipid_sel (str): VMD atom selection for lipid
Returns:
(str) string describing the lipid composition
"""
inner, outer = lipid_composition(lipid_sel, molid)
desc = "Inner leaflet:"
for kind, num in sorted(inner.items()):
desc += " %d %s\n" % (num, kind)
desc += "Outer leaflet:"
for kind, num in sorted(outer.items()):
desc += " %d %s\n" % (num, kind)
return desc
#==========================================================================
def get_system_dimensions(molid):
"""
Gets the periodic box dimensions of a system.
Args:
molid (int) : VMD molecule ID to consider
Returns:
(float tuple) : A, B, C box dimensions
Raises:
ValueError if no box is found
"""
box = molecule.get_periodic(molid)
if box['a'] == 0.0 and box['b'] == 0.0 and box['c'] == 0.0:
raise Exception('No periodic box found in membrane!')
return (box['a'], box['b'], box['c'])
#==========================================================================
def center_system(molid, tmp_dir, center_z=False):
"""
Centers an entire system in the XY-plane, and optionally in the Z
dimension. Needs to save and reload the file in case the current
positions need to be concatenated to produce a new file.
Args:
molid (int): VMD molecule id to center
tmp_dir (str): Directory to create temp file in
center_z (bool): Whether or not to center along the Z axis as well
Returns:
(int) : VMD molecule id of centered system
"""
# pylint: disable=invalid-name
x, y, z = atomsel('all', molid=molid).center()
if center_z is True:
atomsel('all', molid=molid).moveby((-x, -y, -z))
else:
atomsel('all', molid=molid).moveby((-x, -y, 0))
# Save and reload the solute to record atom positions
temp_mae = tempfile.mkstemp(suffix='.mae',
prefix='dabble_centered',
dir=tmp_dir)[1]
atomsel('all', molid=molid).write('mae', temp_mae)
molecule.delete(molid)
new_id = molecule.load('mae', temp_mae)
return new_id
#==========================================================================
def set_ion(molid, atom_id, element):
"""
Sets an atom to be the desired ion
Args:
molid (int): VMD molecule to operate on
atom_id (int): Atom index to change to ion
element (str in Na, K, Cl): Ion to apply
Raises:
ValueError if the index to change is not present
"""
sel = atomsel('index %d' % atom_id, molid=molid)
if not sel:
raise ValueError("Index %d does not exist" % atom_id)
resname = dict(Na='SOD', K='POT', Cl='CLA')[element]
name = dict(Na='NA', K='K', Cl='CL')[element]
attype = dict(Na='NA', K='K', Cl='CL')[element]
charge = dict(Na=1, K=1, Cl=-1)[element]
sel.element = element
sel.name = name
sel.type = attype
sel.resname = resname
sel.chain = 'N'
sel.segid = 'ION'
sel.charge = charge
#==========================================================================
def set_cations(molid, element, filter_sel='none'):
"""
Sets all of the specified atoms to a cation
Args:
molid (int): VMD molecule ID to consider
element (str in Na, K): Cation to convert
filter_sel (str): VMD atom selection string for atoms to convert
Raises:
ValueError if invalid cation specified
"""
if element not in ['Na', 'K']:
raise DabbleError("Invalid cation '%s'. "
"Supported cations are Na, K" % element)
for gid in tuple(atomsel('element K Na and not (%s)' % filter_sel)):
set_ion(molid, gid, element)
#==========================================================================
def tile_system(input_id, times_x, times_y, times_z, tmp_dir):
"""
Tiles the membrane or solvent system the given number of times
in each direction to produce a larger system.
Args:
input_id (int): VMD molecule id to tile
times_x (int): Number of times to tile in x direction
times_y (int): Number of times to tile in y direction
times_z (int): Number of times to tile in z direction
tmp_dir (str): Directory in which to put temporary files
Returns:
(int) VMD molecule ID of tiled system
"""
# pylint: disable=invalid-name, too-many-locals
# Read in the equilibrated bilayer file
new_resid = np.array(atomsel('all', molid=input_id).residue)
num_residues = new_resid.max()
atomsel('all', molid=input_id).user = 2.0
wx, wy, wz = get_system_dimensions(molid=input_id)
# Move the lipids over, save that file, move them back, repeat, then
# stack all of those together to make a tiled membrane. Uses
# temporary mae files to save each "tile" since this
# format is easy to combine. Renumbers residues as it goes along.
tile_filenames = []
for nx in range(times_x):
for ny in range(times_y):
for nz in range(times_z):
tx = np.array([nx * wx, ny * wy, nz * wz])
atomsel('all', input_id).moveby(tuple(tx))
atomsel('all', input_id).resid = [int(_) for _ in new_resid]
new_resid += num_residues
tile_filename = tempfile.mkstemp(suffix='.mae',
prefix='dabble_tile_tmp',
dir=tmp_dir)[1]
tile_filenames.append(tile_filename)
atomsel('all', molid=input_id).write('mae', tile_filename)
atomsel('all', molid=input_id).moveby(tuple(-tx))
# Write all of these tiles together into one large bilayer
merge_output_filename = tempfile.mkstemp(suffix='.mae',
prefix='dabble_merge_tile_tmp',
dir=tmp_dir)[1]
concatenate_mae_files(merge_output_filename,
input_filenames=tile_filenames)
# Read that large bilayer file in as a new molecule and
# write it as the output file
output_id = molecule.load('mae', merge_output_filename)
molecule.set_periodic(output_id, -1,
times_x * wx, times_y * wy, times_z * wz,
90.0, 90.0, 90.0)
# Save and clean up
atomsel('all', molid=output_id).write('mae', merge_output_filename)
for tile_filename in tile_filenames:
os.remove(tile_filename)
return output_id
#==========================================================================
def combine_molecules(input_ids, tmp_dir):
"""
Combines input molecules, closes them and returns the molecule id
of the new molecule that combines them, putting it on top.
Args:
input_ids (list of int): Molecule IDs to combine, will be closed
tmp_dir (str): Directory to put combined molecule into
Returns:
(int) molid of combined system
"""
output_filename = tempfile.mkstemp(suffix='.mae',
prefix='dabble_combine',
dir=tmp_dir)[1]
concatenate_mae_files(output_filename, input_ids=input_ids)
output_id = molecule.load('mae', output_filename)
molecule.set_top(output_id)
for i in input_ids:
molecule.delete(i)
atomsel('all', molid=output_id).beta = 1
return output_id
#==========================================================================
def atomsel_remaining(molid, sel):
"""
Selects all remaining atoms. Whether or not an atom is counted
is determined by value of the beta flag.
Args:
molid (int): VMD molecule id to consider
sel (str): VMD atom selection string to grab, defaults to all
Returns:
VMD atomsel object representing the selection, or None
if no atoms matched the selection
"""
selection = atomsel('beta 1 and (%s)' % sel, molid)
#if len(selection) == 0:
# return None
#else:
return selection
#==========================================================================
def num_atoms_remaining(molid, sel='all'):
"""
Returns the number of atoms remaining in the system, indicated
by the value of the beta flag.
Args:
molid (int): VMD molecule id to consider
sel (str): VMD atom selection to count, defaults to all
Returns:
(int) number of atoms remaining in the system
"""
return len(atomsel_remaining(molid, sel))
#==========================================================================
def num_waters_remaining(molid, water_sel='water and element O'):
"""
Returns the number of waters remaining in the system, indicated
by the value of the beta flag.
Args:
molid (int): VMD molecule id to consider
sel (str): VMD atom selection that counts as water, defaults
to 'water and element O'
Returns:
(int) number of water molecules remaining in the system
Raises:
ValueError if empty water selection
ValueError if non-existent molecule given
"""
if not water_sel:
raise ValueError("Empty water selection string")
if not molecule.exists(molid):
raise ValueError("Invalid molecule %d" % molid)
return len(atomsel_remaining(molid, water_sel))
#==========================================================================
def num_lipids_remaining(molid, lipid_sel):
"""
Returns the number of lipids remaining in the system, indicated
by the value of the beta flag. Uses the fragment notation to count
the number of lipids.
Args:
molid (int): VMD molecule ID to consider
lipid_sel (str): VMD atom selection that counts as lipid
Returns:
(int) number of lipid molecules remaining in the system
Raises:
ValueError if empty lipid selection
ValueError if non-existent molecule given
"""
if not lipid_sel:
raise ValueError("Empty lipid selection string")
if not molecule.exists(molid):
raise ValueError("Invalid molecule %d" % molid)
return np.unique(atomsel_remaining(molid, lipid_sel).fragment).size
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
Eigenstate/dabble
|
dabble/molutils.py
|
Python
|
gpl-2.0
| 19,999
|
[
"VMD"
] |
3de72483eabfe483680d068c096b430cab012b5b683b4053f0be79abba9aaf76
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtCore, QtGui
import mantid.simpleapi as mantid
from Muon.GUI.Common.utilities import table_utils
from Muon.GUI.Common.message_box import warning
class FFTView(QtGui.QWidget):
"""
creates the layout for the FFT GUI
"""
# signals
buttonSignal = QtCore.pyqtSignal()
tableClickSignal = QtCore.pyqtSignal(object, object)
phaseCheckSignal = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(FFTView, self).__init__(parent)
self.grid = QtGui.QGridLayout(self)
# add splitter for resizing
splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
# make table
self.FFTTable = QtGui.QTableWidget(self)
self.FFTTable.resize(800, 800)
self.FFTTable.setRowCount(9)
self.FFTTable.setColumnCount(2)
self.FFTTable.setColumnWidth(0, 300)
self.FFTTable.setColumnWidth(1, 300)
self.FFTTable.verticalHeader().setVisible(False)
self.FFTTable.horizontalHeader().setStretchLastSection(True)
self.FFTTable.setHorizontalHeaderLabels(
("FFT Property;Value").split(";"))
# populate table
options = ['test']
table_utils.setRowName(self.FFTTable, 0, "Workspace")
self.ws = table_utils.addComboToTable(self.FFTTable, 0, options)
self.Im_box_row = 1
table_utils.setRowName(
self.FFTTable,
self.Im_box_row,
"Imaginary Data")
self.Im_box = table_utils.addCheckBoxToTable(
self.FFTTable, True, self.Im_box_row)
table_utils.setRowName(self.FFTTable, 2, "Imaginary Workspace")
self.Im_ws = table_utils.addComboToTable(self.FFTTable, 2, options)
self.shift_box_row = 3
table_utils.setRowName(self.FFTTable, self.shift_box_row, "Auto shift")
self.shift_box = table_utils.addCheckBoxToTable(
self.FFTTable, True, self.shift_box_row)
table_utils.setRowName(self.FFTTable, 4, "Shift")
self.shift = table_utils.addDoubleToTable(self.FFTTable, 0.0, 4)
self.FFTTable.hideRow(4)
table_utils.setRowName(self.FFTTable, 5, "Use Raw data")
self.Raw_box = table_utils.addCheckBoxToTable(self.FFTTable, True, 5)
table_utils.setRowName(self.FFTTable, 6, "First Good Data")
self.x0 = table_utils.addDoubleToTable(self.FFTTable, 0.1, 6)
self.FFTTable.hideRow(6)
table_utils.setRowName(self.FFTTable, 7, "Last Good Data")
self.xN = table_utils.addDoubleToTable(self.FFTTable, 15.0, 7)
self.FFTTable.hideRow(7)
table_utils.setRowName(self.FFTTable, 8, "Construct Phase Table")
self.phaseTable_box = table_utils.addCheckBoxToTable(
self.FFTTable, True, 8)
self.FFTTable.hideRow(8)
self.FFTTable.resizeRowsToContents()
# make advanced table options
self.advancedLabel = QtGui.QLabel("\n Advanced Options")
self.FFTTableA = QtGui.QTableWidget(self)
self.FFTTableA.resize(800, 800)
self.FFTTableA.setRowCount(4)
self.FFTTableA.setColumnCount(2)
self.FFTTableA.setColumnWidth(0, 300)
self.FFTTableA.setColumnWidth(1, 300)
self.FFTTableA.verticalHeader().setVisible(False)
self.FFTTableA.horizontalHeader().setStretchLastSection(True)
self.FFTTableA.setHorizontalHeaderLabels(
("Advanced Property;Value").split(";"))
table_utils.setRowName(self.FFTTableA, 0, "Apodization Function")
options = ["Lorentz", "Gaussian", "None"]
self.apodization = table_utils.addComboToTable(
self.FFTTableA, 0, options)
table_utils.setRowName(
self.FFTTableA,
1,
"Decay Constant (micro seconds)")
self.decay = table_utils.addDoubleToTable(self.FFTTableA, 4.4, 1)
table_utils.setRowName(self.FFTTableA, 2, "Negative Padding")
self.negativePadding = table_utils.addCheckBoxToTable(
self.FFTTableA, True, 2)
table_utils.setRowName(self.FFTTableA, 3, "Padding")
self.padding = table_utils.addSpinBoxToTable(self.FFTTableA, 1, 3)
self.FFTTableA.resizeRowsToContents()
# make button
self.button = QtGui.QPushButton('Calculate FFT', self)
self.button.setStyleSheet("background-color:lightgrey")
# connects
self.FFTTable.cellClicked.connect(self.tableClick)
self.button.clicked.connect(self.buttonClick)
self.ws.currentIndexChanged.connect(self.phaseCheck)
# add to layout
self.FFTTable.setMinimumSize(40, 158)
self.FFTTableA.setMinimumSize(40, 127)
table_utils.setTableHeaders(self.FFTTable)
table_utils.setTableHeaders(self.FFTTableA)
# add to layout
splitter.addWidget(self.FFTTable)
splitter.addWidget(self.advancedLabel)
splitter.addWidget(self.FFTTableA)
self.grid.addWidget(splitter)
self.grid.addWidget(self.button)
def getLayout(self):
return self.grid
# add data to view
def addItems(self, options):
self.ws.clear()
self.ws.addItems(options)
self.ws.addItem("PhaseQuad")
self.Im_ws.clear()
self.Im_ws.addItems(options)
self.phaseQuadChanged()
def removeIm(self, pattern):
index = self.Im_ws.findText(pattern)
self.Im_ws.removeItem(index)
def removeRe(self, pattern):
index = self.ws.findText(pattern)
self.ws.removeItem(index)
def setReTo(self, name):
index = self.ws.findText(name)
if index == -1:
return
self.ws.setCurrentIndex(index)
def setImTo(self, name):
index = self.Im_ws.findText(name)
if index == -1:
return
self.Im_ws.setCurrentIndex(index)
# connect signals
def phaseCheck(self):
self.phaseCheckSignal.emit()
def tableClick(self, row, col):
self.tableClickSignal.emit(row, col)
def buttonClick(self):
self.buttonSignal.emit()
def getInputWS(self):
return self.ws.currentText()
def getInputImWS(self):
return self.Im_ws.currentText()
# responses to commands
def activateButton(self):
self.button.setEnabled(True)
def deactivateButton(self):
self.button.setEnabled(False)
def setPhaseBox(self):
self.FFTTable.setRowHidden(8, "PhaseQuad" not in self.getWS())
def changed(self, box, row):
self.FFTTable.setRowHidden(row, box.checkState() == QtCore.Qt.Checked)
def changedHideUnTick(self, box, row):
self.FFTTable.setRowHidden(row, box.checkState() != QtCore.Qt.Checked)
def phaseQuadChanged(self):
# show axis
self.FFTTable.setRowHidden(6, "PhaseQuad" not in self.getWS())
self.FFTTable.setRowHidden(7, "PhaseQuad" not in self.getWS())
# hide complex ws
self.FFTTable.setRowHidden(2, "PhaseQuad" in self.getWS())
# these are for getting inputs
def getRunName(self):
if mantid.AnalysisDataService.doesExist("MuonAnalysis_1"):
tmpWS = mantid.AnalysisDataService.retrieve("MuonAnalysis_1")
else:
tmpWS = mantid.AnalysisDataService.retrieve("MuonAnalysis")
return tmpWS.getInstrument().getName() + str(tmpWS.getRunNumber()).zfill(8)
def initFFTInput(self, run=None):
inputs = {}
inputs[
'InputWorkspace'] = "__ReTmp__" #
inputs['Real'] = 0 # always zero
out = str(self.ws.currentText()).replace(";", "; ")
if run is None:
run = self.getRunName()
inputs['OutputWorkspace'] = run + ";" + out + ";FFT"
inputs["AcceptXRoundingErrors"] = True
return inputs
def addFFTComplex(self, inputs):
inputs["InputImagWorkspace"] = "__ImTmp__"
inputs["Imaginary"] = 0 # always zero
def addFFTShift(self, inputs):
inputs['AutoShift'] = False
inputs['Shift'] = float(self.shift.text())
def addRaw(self, inputs, key):
inputs[key] += "_Raw"
def getFFTRePhase(self, inputs):
inputs['InputWorkspace'] = "__ReTmp__"
inputs['Real'] = 0 # always zero
def getFFTImPhase(self, inputs):
inputs['InputImagWorkspace'] = "__ReTmp__"
inputs['Imaginary'] = 1
def initAdvanced(self):
inputs = {}
inputs["ApodizationFunction"] = str(self.apodization.currentText())
inputs["DecayConstant"] = float(self.decay.text())
inputs["NegativePadding"] = self.negativePadding.checkState()
inputs["Padding"] = int(self.padding.text())
return inputs
def ReAdvanced(self, inputs):
inputs['InputWorkspace'] = str(
self.ws.currentText()).replace(";",
"; ")
inputs['OutputWorkspace'] = "__ReTmp__"
def ImAdvanced(self, inputs):
inputs['InputWorkspace'] = str(
self.Im_ws.currentText()).replace(";",
"; ")
inputs['OutputWorkspace'] = "__ImTmp__"
def RePhaseAdvanced(self, inputs):
inputs['InputWorkspace'] = "__phaseQuad__"
inputs['OutputWorkspace'] = "__ReTmp__"
# get methods (from the GUI)
def getWS(self):
return str(self.ws.currentText()).replace(";", "; ")
def isAutoShift(self):
return self.shift_box.checkState() == QtCore.Qt.Checked
def isComplex(self):
return self.Im_box.checkState() == QtCore.Qt.Checked
def isRaw(self):
return self.Raw_box.checkState() == QtCore.Qt.Checked
def set_raw_checkbox_state(self, state):
if state:
self.Raw_box.setCheckState(QtCore.Qt.Checked)
else:
self.Raw_box.setCheckState(QtCore.Qt.Unchecked)
def setup_raw_checkbox_changed(self, slot):
self.FFTTable.itemChanged.connect(self.raw_checkbox_changed)
self.signal_raw_option_changed = slot
def raw_checkbox_changed(self, table_item):
if table_item == self.Raw_box:
self.signal_raw_option_changed()
def getImBoxRow(self):
return self.Im_box_row
def getShiftBoxRow(self):
return self.shift_box_row
def getImBox(self):
return self.Im_box
def getShiftBox(self):
return self.shift_box
def getFirstGoodData(self):
return float(self.x0.text())
def getLastGoodData(self):
return (self.xN.text())
def isNewPhaseTable(self):
return self.phaseTable_box.checkState() == QtCore.Qt.Checked
def isPhaseBoxShown(self):
return self.FFTTable.isRowHidden(8)
def warning_popup(self, message):
warning(message, parent=self)
|
mganeva/mantid
|
scripts/Muon/GUI/FrequencyDomainAnalysis/FFT/fft_view.py
|
Python
|
gpl-3.0
| 11,098
|
[
"Gaussian"
] |
3ef9f0f85365a014d71c122f7bc7e1d96f2483610152e17483f3fb19827f1227
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Game based on Maxwell's demon, a thought experiment used to teach statistical
thermodynamics. The user has to scoop particles from a chamber and guide them
to another chamber through a channel with the help of a snake controlled by a
gamepad or the keyboard. The particle imbalance between chambers creates
a pressure gradient that makes it harder to move particles to the chamber
with an excess of particles.
"""
from threading import Thread
import numpy as np
import time
import espressomd
import espressomd.shapes
from espressomd.visualization_opengl import openGLLive, KeyboardButtonEvent, KeyboardFireEvent
required_features = ["LENNARD_JONES", "WCA", "MASS",
"EXTERNAL_FORCES", "THERMOSTAT_PER_PARTICLE"]
espressomd.assert_features(required_features)
print("""THE CHAMBER GAME
YOUR GOAL IS TO SCOOP ALL BLUE PARTICLES INTO THE RIGHT BOX.
GREEN/RED SPHERES CAN BE PICKED UP TO INCREASE/DECREASE
THE TEMPERATURE IN THE CHAMBER WHERE THEY ARE COLLECTED.""")
try:
import pygame
has_pygame = True
print("\nCONTROLS:"
"\nMOVE: (JOYSTICK AXIS), (KEYBOARD i/j/k/l)"
"\nACTION BUTTON: (JOYSTICK A), (KEYBOARD p)"
"\nRESTART: (JOYSTICK START), (KEYBOARD b)")
except ImportError:
has_pygame = False
print("\nCONTROLS:"
"\nMOVE: (KEYBOARD i/j/k/l)"
"\nACTION BUTTON: (KEYBOARD p)"
"\nRESTART: (KEYBOARD b)")
box = np.array([1500.0, 500.0, 150.0])
system = espressomd.System(box_l=box)
# PARAMETERS
# PHYSICS
gamma_snake_head = 1.0
gamma_snake_bead = 15.0
temp_l = 10000.0
temp_r = temp_l
temp_max = 1e5
gamma_bubbles = 0.5
temperature = 1.0
gamma = 1.0
system.time_step = 0.001
# SNAKE
snake_n = 10
snake_head_sigma = 50.0
snake_bead_sigma = 20.0
snake_length = (snake_n - 1) * snake_bead_sigma + snake_head_sigma
snake_startpos = [snake_head_sigma, box[1] - snake_head_sigma, box[2] * 0.5]
snake_head_type = 0
snake_bead_type = 1
snake_head_mass = 1000.0
snake_bead_mass = 10.0
harmonic_k = 500.0 * snake_bead_mass
# PORE
pore_length = box[0] * 0.25
pore_xl = box[0] * 0.5 - pore_length * 0.5
pore_xr = box[0] * 0.5 + pore_length * 0.5
cylinder_type = 2
cylinder_sigma = 1.0
pore_radius = snake_head_sigma * 1.3
# CONTROL
move_force = 70000.0
expl_range = 200.0
expl_force = 20000.0
# BUBBLES
bubble_type = 3
bubble_sigma = 36.0
bubble_snake_eps = 10
bubble_bubble_eps = 10000.0
bubble_mass = 50.0
bubbles_n = 180
# TEMP CHANGE PARTICLE
temp_change_radius = 25
temp_change_inc_type = 4
temp_change_dec_type = 5
dtemp = 1000.0
# VISUALIZER
zoom = 10
visualizer = openGLLive(
system,
window_size=[800, 600],
draw_axis=False,
particle_sizes=[
snake_head_sigma * 0.5,
snake_bead_sigma * 0.5,
cylinder_sigma,
bubble_sigma * 0.5,
temp_change_radius,
temp_change_radius],
particle_type_colors=[[1, 1, 0],
[1, 0, 1],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0],
[1, 0, 0],
[0.5, 0, 1]],
constraint_type_colors=[[1, 1, 1]],
camera_position=[snake_startpos[0],
snake_startpos[1],
system.box_l[2] * zoom],
camera_target=snake_startpos)
# JOYPAD CONTROL
if has_pygame:
pygame.init()
pygame.joystick.init()
# CHECK FOR JOYSTICKS
if pygame.joystick.get_count() > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
joystick_control = True
else:
joystick_control = False
# CELLSYSTEM
system.cell_system.skin = 3.0
system.cell_system.set_regular_decomposition(use_verlet_lists=False)
# BONDS
harmonic_head = espressomd.interactions.HarmonicBond(
k=harmonic_k, r_0=0.5 * (snake_head_sigma + snake_bead_sigma))
harmonic_bead = espressomd.interactions.HarmonicBond(
k=harmonic_k, r_0=snake_bead_sigma)
system.bonded_inter.add(harmonic_head)
system.bonded_inter.add(harmonic_bead)
# PARTICLES
# SNAKE
for i in range(snake_n):
if i == 0:
p_head = system.part.add(
pos=snake_startpos,
type=snake_head_type,
fix=[False, False, True],
mass=snake_head_mass,
gamma=gamma_snake_head)
else:
system.part.add(
pos=snake_startpos
+ np.array([0, -1, 0])
* (0.5 * (snake_head_sigma + snake_bead_sigma)
+ (i - 1) * snake_bead_sigma),
bonds=(harmonic_bead if (i > 1) else harmonic_head, i - 1),
type=snake_bead_type,
fix=[False, False, True],
mass=snake_bead_mass,
gamma=gamma_snake_bead)
# NB INTER
WCA_cut = 2.0**(1. / 6.)
system.non_bonded_inter[snake_head_type, snake_head_type].wca.set_params(
epsilon=1.0, sigma=snake_head_sigma)
sm = 0.5 * (snake_head_sigma + snake_bead_sigma)
system.non_bonded_inter[snake_bead_type, snake_head_type].wca.set_params(
epsilon=1.0, sigma=sm)
system.non_bonded_inter[snake_bead_type, snake_bead_type].wca.set_params(
epsilon=1.0, sigma=snake_bead_sigma)
sm = 0.5 * (snake_head_sigma + cylinder_sigma)
system.non_bonded_inter[snake_head_type, cylinder_type].wca.set_params(
epsilon=10.0, sigma=sm)
sm = 0.5 * (snake_bead_sigma + cylinder_sigma)
system.non_bonded_inter[snake_bead_type, cylinder_type].wca.set_params(
epsilon=10.0, sigma=sm)
sm = 0.5 * (bubble_sigma + snake_bead_sigma)
system.non_bonded_inter[snake_bead_type, bubble_type].wca.set_params(
epsilon=bubble_snake_eps, sigma=sm)
sm = 0.5 * (bubble_sigma + snake_head_sigma)
system.non_bonded_inter[snake_head_type, bubble_type].wca.set_params(
epsilon=1.0, sigma=sm)
sm = 0.5 * (bubble_sigma + cylinder_sigma)
system.non_bonded_inter[bubble_type, cylinder_type].lennard_jones.set_params(
epsilon=1000.0, sigma=sm, cutoff=2.5 * sm, shift="auto")
system.non_bonded_inter[bubble_type, bubble_type].lennard_jones.set_params(
epsilon=bubble_bubble_eps, sigma=bubble_sigma, cutoff=2.5 * bubble_sigma, shift="auto")
# CONSTRAINTS
system.constraints.add(shape=espressomd.shapes.Wall(
dist=0, normal=[1, 0, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=-box[0], normal=[-1, 0, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=0, normal=[0, 1, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=-box[1], normal=[0, -1, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.SimplePore(
center=0.5 * box, axis=[1, 0, 0], length=pore_length, radius=pore_radius,
smoothing_radius=5), particle_type=cylinder_type, penetrable=True)
# BUBBLES
n = 0
while n < bubbles_n:
# bpos = [pore_xr + np.random.random() * (pore_xr - pore_xl -
# snake_head_sigma*4) + snake_head_sigma * 2, np.random.random() * box[1],
# box[2]*0.5]
bpos = [np.random.random() * (pore_xl - snake_head_sigma * 4) +
snake_head_sigma * 2, np.random.random() * box[1], box[2] * 0.5]
new_part = system.part.add(
pos=bpos,
type=bubble_type,
fix=[False, False, True],
mass=bubble_mass,
gamma=gamma_bubbles)
n += 1
if np.min([system.distance(new_part, p.pos)
for p in system.part if p.id != new_part.id]) < bubble_sigma * 0.5:
new_part.remove()
n -= 1
p_bubbles = system.part.select(type=bubble_type)
# TEMP CHANGE PARTICLES
bpos = [np.random.random() * (pore_xl - snake_head_sigma * 4) +
snake_head_sigma * 2, np.random.random() * box[1], box[2] * 0.5]
p_temp_inc = system.part.add(
pos=bpos,
type=temp_change_inc_type,
fix=[True, True, True])
bpos = [pore_xr
+ np.random.random() * (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
p_temp_dec = system.part.add(
pos=bpos,
type=temp_change_dec_type,
fix=[True, True, True])
# MINIMIZE ENERGY
system.integrator.set_steepest_descent(f_max=100, gamma=30.0,
max_displacement=0.01)
system.integrator.run(10000)
system.integrator.set_vv()
p_startpos = system.part.all().pos
# THERMOSTAT
system.thermostat.set_langevin(kT=temperature, gamma=gamma, seed=42)
# CONTROL CALLBACKS
F_act_k = np.zeros(2)
F_act_j = np.zeros(2)
def move_up_set():
global F_act_k
F_act_k[1] = 1.0
set_particle_force()
def move_down_set():
global F_act_k
F_act_k[1] = -1.0
set_particle_force()
def move_updown_reset():
global F_act_k
F_act_k[1] = 0
set_particle_force()
def move_left_set():
global F_act_k
F_act_k[0] = -1.0
set_particle_force()
def move_right_set():
global F_act_k
F_act_k[0] = 1.0
set_particle_force()
def move_leftright_reset():
global F_act_k
F_act_k[0] = 0
set_particle_force()
def set_particle_force():
global F_act_j, F_act_k
F_control_tot = np.append(np.clip(F_act_k + F_act_j, -1, 1), 0)
p_head.ext_force = move_force * F_control_tot
def restart():
system.part.all().pos = p_startpos
system.galilei.kill_particle_motion()
system.galilei.kill_particle_forces()
expl_time = 0
exploding = False
def explode():
global exploding, expl_time
if not exploding:
exploding = True
expl_time = time.time()
for p in p_bubbles:
dv = p.pos - p_head.pos
lv = np.linalg.norm(dv)
if lv < expl_range:
p.v = dv / lv / lv * expl_force
# KEYBOARD CONTROLS
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('i', KeyboardFireEvent.Pressed, move_up_set))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('k', KeyboardFireEvent.Pressed, move_down_set))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('i', KeyboardFireEvent.Released, move_updown_reset))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('k', KeyboardFireEvent.Released, move_updown_reset))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('j', KeyboardFireEvent.Pressed, move_left_set))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('l', KeyboardFireEvent.Pressed, move_right_set))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('j', KeyboardFireEvent.Released, move_leftright_reset))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('l', KeyboardFireEvent.Released, move_leftright_reset))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('p', KeyboardFireEvent.Pressed, explode))
visualizer.keyboard_manager.register_button(
KeyboardButtonEvent('b', KeyboardFireEvent.Pressed, restart))
# MAIN LOOP
def main():
global F_act_j, F_act_k, temp_l, temp_r, exploding, expl_time
def T_to_g(temp):
return 0.1 + 5.0 / (1.0 + 0.001 * temp)
zoom_eq = 5.0
zoom_v = 0.0
zoom = zoom_eq
zoom_dt = 0.01
ud_cnt = 0
tincF = 0
tdecF = 0
exploding = False
button_A_old = 0
button_Start_old = 0
while True:
# INTEGRATE
system.integrator.run(1)
if p_head.pos[0] > pore_xl and p_head.pos[0] < pore_xr:
z_eq = 10.0
v_f = 0.1
else:
z_eq = zoom_eq
v_f = 1.0
# CAMERA TRACKING
zoom_a = (z_eq - zoom) * 0.2 - zoom_v * 0.8 + v_f * \
0.005 * np.linalg.norm(p_head.v)
zoom_v += zoom_a * zoom_dt
zoom += zoom_v * zoom_dt + zoom_a * zoom_dt * zoom_dt
camPos = np.copy(p_head.pos) - box * 0.5
camPos[2] = box[2] * zoom
camTarget = p_head.pos - box * 0.5
t = camPos - camTarget
r = np.linalg.norm(t)
visualizer.camera.state_pos = camPos
visualizer.camera.state_target = -t / r
visualizer.camera.update_modelview()
# COUNT L/R
ud_cnt += 1
if ud_cnt > 100:
ud_cnt = 0
pl = system.part.select(
lambda p: p.pos[0] < pore_xl and p.type == bubble_type)
pr = system.part.select(
lambda p: p.pos[0] > pore_xr and p.type == bubble_type)
Nl = len(pl)
Nr = len(pr)
for p in pl:
p.gamma = T_to_g(temp_l)
for p in pr:
p.gamma = T_to_g(temp_r)
w = visualizer.specs['window_size']
visualizer.user_texts = [
[[20, w[1] - 20], f'LEFT: {Nl} RIGHT: {Nr}'],
[[20, w[1] - 40], f'TEMPERATURE LEFT: {temp_l:.0f} TEMPERATURE RIGHT: {temp_r:.0f}']]
# [[w[0] * 0.5, w[1] - 60], f'GAMMA LEFT: {T_to_g(temp_l):0.4f} GAMMA RIGHT: {T_to_g(temp_r):0.4f}']]
# TEMP CHANGE COLLISION
repos_temp_inc = False
repos_temp_dec = False
if np.linalg.norm(
p_head.pos - p_temp_inc.pos) < temp_change_radius + snake_head_sigma * 0.5:
repos_temp_inc = True
if p_temp_inc.pos[0] > box[0] * 0.5:
temp_r += dtemp
if temp_r > temp_max:
temp_r = temp_max
else:
temp_l += dtemp
if temp_l > temp_max:
temp_l = temp_max
if np.linalg.norm(
p_head.pos - p_temp_dec.pos) < temp_change_radius + snake_head_sigma * 0.5:
repos_temp_dec = True
if p_temp_dec.pos[0] > box[0] * 0.5:
temp_r -= dtemp
if temp_r < 0:
temp_r = 0.0
for p in p_bubbles:
if p.pos[0] > pore_xr:
p.v = [0, 0, 0]
else:
temp_l -= dtemp
if temp_l < 0:
temp_l = 0.0
for p in p_bubbles:
if p.pos[0] < pore_xl:
p.v = [0, 0, 0]
# PLACE TEMP CHANGE PARTICLES
tincF += 1
tdecF += 1
if repos_temp_inc or tincF > 5000:
tincF = 0
if np.random.random() < 0.5:
p_temp_inc.pos = [np.random.random()
* (pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
else:
p_temp_inc.pos = [pore_xr
+ np.random.random()
* (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
if repos_temp_dec or tdecF > 5000:
tdecF = 0
if np.random.random() < 0.5:
p_temp_dec.pos = [np.random.random()
* (pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
else:
p_temp_dec.pos = [pore_xr
+ np.random.random()
* (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
# REENABLE EXPLOSION
if exploding and time.time() - expl_time > 1:
exploding = False
# VISUALIZER
visualizer.update()
if has_pygame:
if joystick_control:
pygame.event.get()
axis_l = np.array(
[joystick.get_axis(0), -joystick.get_axis(1)])
axis_r = np.array(
[joystick.get_axis(3), -joystick.get_axis(4)])
button_A = joystick.get_button(0)
button_Start = joystick.get_button(7)
if not button_A_old and button_A:
explode()
if not button_Start_old and button_Start:
restart()
button_A_old = button_A
button_Start_old = button_A
hat = joystick.get_hat(0)
F_act_j = np.clip(np.array(hat) + axis_l + axis_r, -1, 1)
set_particle_force()
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
|
pkreissl/espresso
|
samples/chamber_game.py
|
Python
|
gpl-3.0
| 17,459
|
[
"ESPResSo"
] |
2a6aeb20260af818843157dd4de7c9046b113815127eea9384504bc066c5cb67
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import sys, os
import hashlib
import base64
from datetime import datetime, timedelta
import requests
from requests.exceptions import ConnectionError
from github import Github, UnknownObjectException, GithubException
import click
from jinja2 import Template
from tqdm import tqdm
from collections import OrderedDict
import markdown2
import json
__version__ = "1.0"
ROOT = os.path.dirname(os.path.realpath(__file__))
PY3 = sys.version_info >= (3, 0)
COLORED_LABELS = (
("1192FC", "investigating",),
("FFA500", "degraded performance"),
("FF4D4D", "major outage", )
)
STATUSES = [status for _, status in COLORED_LABELS]
SYSTEM_LABEL_COLOR = "171717"
TEMPLATES = [
"template.html",
"style.css",
"statuspage.js",
"translations.ini"
]
DEFAULT_CONFIG = {
"footer": "Status page hosted by GitHub, generated with <a href='https://github.com/jayfk/statuspage'>jayfk/statuspage</a>",
"logo": "https://raw.githubusercontent.com/jayfk/statuspage/main/template/logo.png",
"title": "Status",
"favicon": "https://raw.githubusercontent.com/jayfk/statuspage/main/template/favicon.png"
}
@click.group()
@click.version_option(__version__, '-v', '--version')
def cli(): # pragma: no cover
pass
@cli.command()
@click.option('--name', prompt='Name', help='')
@click.option('--token', prompt='GitHub API Token', help='')
@click.option('--org', help='GitHub Organization', default=False)
@click.option('--systems', prompt='Systems, eg (Website,API)', help='')
@click.option('--private/--public', default=False)
def create(token, name, systems, org, private):
run_create(name=name, token=token, systems=systems, org=org, private=private)
@cli.command()
@click.option('--name', prompt='Name', help='')
@click.option('--org', help='GitHub Organization', default=False)
@click.option('--token', prompt='GitHub API Token', help='')
def update(name, token, org):
run_update(name=name, token=token, org=org)
@cli.command()
@click.option('--name', prompt='Name', help='')
@click.option('--org', help='GitHub Organization', default=False)
@click.option('--token', prompt='GitHub API Token', help='')
def upgrade(name, token, org):
run_upgrade(name=name, token=token, org=org)
@cli.command()
@click.option('--name', prompt='Name', help='')
@click.option('--org', help='GitHub Organization', default=False)
@click.option('--token', prompt='GitHub API Token', help='')
@click.option('--system', prompt='System', help='System to add')
@click.option('--prompt/--no-prompt', default=True)
def add_system(name, token, org, system, prompt):
run_add_system(name=name, token=token, org=org, system=system, prompt=prompt)
@cli.command()
@click.option('--name', prompt='Name', help='')
@click.option('--org', help='GitHub Organization', default=False)
@click.option('--token', prompt='GitHub API Token', help='')
@click.option('--system', prompt='System', help='System to remove')
@click.option('--prompt/--no-prompt', default=True)
def remove_system(name, token, org, system, prompt):
run_remove_system(name=name, token=token, org=org, system=system, prompt=prompt)
def run_add_system(name, token, org, system, prompt):
"""
Adds a new system to the repo.
"""
repo = get_repo(token=token, org=org, name=name)
try:
repo.create_label(name=system.strip(), color=SYSTEM_LABEL_COLOR)
click.secho("Successfully added new system {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except GithubException as e:
if e.status == 422:
click.secho(
"Unable to add new system {}, it already exists.".format(system), fg="yellow")
return
raise
def run_remove_system(name, token, org, system, prompt):
"""
Removes a system from the repo.
"""
repo = get_repo(token=token, org=org, name=name)
try:
label = repo.get_label(name=system.strip())
label.delete()
click.secho("Successfully deleted {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except UnknownObjectException:
click.secho("Unable to remove system {}, it does not exist.".format(system), fg="yellow")
def run_upgrade(name, token, org):
click.echo("Upgrading...")
repo = get_repo(token=token, name=name, org=org)
files = get_files(repo=repo)
head_sha = repo.get_git_ref("heads/gh-pages").object.sha
# add all the template files to the gh-pages branch
for template in tqdm(TEMPLATES, desc="Updating template files"):
with open(os.path.join(ROOT, "template", template), "r") as f:
content = f.read()
if template in files:
repo_template = repo.get_contents(
path=template,
ref=head_sha,
)
if not is_same_content(
content,
base64.b64decode(repo_template.content).decode('utf-8')
):
repo.update_file(
path=template,
sha=repo_template.sha,
message="upgrade",
content=content,
branch="gh-pages"
)
else:
repo.create_file(
path=template,
message="upgrade",
content=content,
branch="gh-pages"
)
def run_update(name, token, org):
click.echo("Generating..")
repo = get_repo(token=token, name=name, org=org)
issues = get_issues(repo)
# get the SHA of the current HEAD
sha = repo.get_git_ref("heads/gh-pages").object.sha
# get the template from the repo
template_file = repo.get_contents(
path="template.html",
ref=sha
)
systems = get_systems(repo, issues)
incidents = get_incidents(repo, issues)
panels = get_panels(systems)
# render the template
config = get_config(repo)
template = Template(template_file.decoded_content.decode("utf-8"))
content = template.render({
"systems": systems, "incidents": incidents, "panels": panels, "config": config
})
# create/update the index.html with the template
try:
# get the index.html file, we need the sha to update it
index = repo.get_contents(
path="index.html",
ref=sha,
)
if is_same_content(content, base64.b64decode(index.content).decode('utf-8')):
click.echo("Local status matches remote status, no need to commit.")
return False
repo.update_file(
path="index.html",
sha=index.sha,
message="update index",
content=content,
branch="gh-pages"
)
except UnknownObjectException:
# index.html does not exist, create it
repo.create_file(
path="index.html",
message="initial",
content=content,
branch="gh-pages",
)
def run_create(name, token, systems, org, private):
gh = Github(token)
if org:
entity = gh.get_organization(org)
else:
entity = gh.get_user()
description="Visit this site at https://{login}.github.io/{name}/".format(
login=entity.login,
name=name
)
# create the repo
repo = entity.create_repo(name=name, description=description, private=private)
# get all labels an delete them
for label in tqdm(list(repo.get_labels()), "Deleting initial labels"):
label.delete()
# create new status labels
for color, label in tqdm(COLORED_LABELS, desc="Creating status labels"):
repo.create_label(name=label, color=color)
# create system labels
for label in tqdm(systems.split(","), desc="Creating system labels"):
repo.create_label(name=label.strip(), color=SYSTEM_LABEL_COLOR)
# add an empty file to main, otherwise we won't be able to create the gh-pages
# branch
repo.create_file(
path="README.md",
message="initial",
content=description,
)
# create the gh-pages branch
ref = repo.get_git_ref("heads/main")
repo.create_git_ref(ref="refs/heads/gh-pages", sha=ref.object.sha)
# add all the template files to the gh-pages branch
for template in tqdm(TEMPLATES, desc="Adding template files"):
with open(os.path.join(ROOT, "template", template), "r", encoding='utf-8') as f:
repo.create_file(
path=template,
message="initial",
content=f.read(),
branch="gh-pages"
)
# set the gh-pages branch to be the default branch
repo.edit(name=name, default_branch="gh-pages")
# run an initial update to add content to the index
run_update(token=token, name=name, org=org)
click.echo("\nCreate new issues at https://github.com/{login}/{name}/issues".format(
login=entity.login,
name=name
))
click.echo("Visit your new status page at https://{login}.github.io/{name}/".format(
login=entity.login,
name=name
))
click.secho("\nYour status page is now set up and ready!\n", fg="green")
click.echo("Please note: You need to run the 'statuspage update' command whenever you update or create an issue.\n")
click.echo("\nIn order to update this status page, run the following command:")
click.echo("statuspage update --name={name} --token={token} {org}".format(
name=name, token=token, org="--org=" + entity.login if org else ""))
def iter_systems(labels):
for label in labels:
if label.color == SYSTEM_LABEL_COLOR:
yield label.name
def get_files(repo):
"""
Get a list of all files.
"""
return [file.path for file in repo.get_contents("/", ref="gh-pages")]
def get_config(repo):
"""
Get the config for the repo, merged with the default config. Returns the default config if
no config file is found.
"""
files = get_files(repo)
config = DEFAULT_CONFIG
if "config.json" in files:
# get the config file, parse JSON and merge it with the default config
config_file = repo.get_contents('config.json', ref="gh-pages")
try:
repo_config = json.loads(config_file.decoded_content.decode("utf-8"))
config.update(repo_config)
except ValueError:
click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow")
return config
def get_severity(labels):
label_map = dict(COLORED_LABELS)
for label in labels:
if label.color in label_map:
return label_map[label.color]
return None
def get_panels(systems):
# initialize and fill the panels with affected systems
panels = OrderedDict()
for system, data in systems.items():
if data["status"] != "operational":
if data["status"] in panels:
panels[data["status"]].append(system)
else:
panels[data["status"]] = [system, ]
return panels
def get_repo(token, name, org):
gh = Github(token)
if org:
return gh.get_organization(org).get_repo(name=name)
return gh.get_user().get_repo(name=name)
def get_collaborators(repo):
return [col.login for col in repo.get_collaborators()]
def get_systems(repo, issues):
systems = OrderedDict()
# get all systems and mark them as operational
for name in sorted(iter_systems(labels=repo.get_labels())):
systems[name] = {
"status": "operational",
}
for issue in issues:
if issue.state == "open":
labels = issue.get_labels()
severity = get_severity(labels)
affected_systems = list(iter_systems(labels))
# shit is hitting the fan RIGHT NOW. Mark all affected systems
for affected_system in affected_systems:
systems[affected_system]["status"] = severity
return systems
def get_incidents(repo, issues):
# loop over all issues in the past 90 days to get current and past incidents
incidents = []
collaborators = get_collaborators(repo=repo)
for issue in issues:
labels = issue.get_labels()
affected_systems = sorted(iter_systems(labels))
severity = get_severity(labels)
# make sure that non-labeled issues are not displayed
if not affected_systems or (severity is None and issue.state != "closed"):
continue
# make sure that the user that created the issue is a collaborator
if issue.user.login not in collaborators:
continue
# create an incident
incident = {
"created": issue.created_at,
"title": issue.title,
"systems": affected_systems,
"severity": severity,
"closed": issue.state == "closed",
"body": markdown2.markdown(issue.body),
"updates": []
}
for comment in issue.get_comments():
# add comments by collaborators only
if comment.user.login in collaborators:
incident["updates"].append({
"created": comment.created_at,
"body": markdown2.markdown(comment.body)
})
incidents.append(incident)
# sort incidents by date
return sorted(incidents, key=lambda i: i["created"], reverse=True)
def get_issues(repo):
return repo.get_issues(state="all", since=datetime.now() - timedelta(days=90))
def is_same_content(c1, c2):
def sha1(c):
if PY3:
if isinstance(c, str):
c = bytes(c, "utf-8")
else:
c = c.encode("utf-8")
return hashlib.sha1(c)
return sha1(c1).hexdigest() == sha1(c2).hexdigest()
if __name__ == '__main__': # pragma: no cover
cli()
|
jayfk/statuspage
|
statuspage/statuspage.py
|
Python
|
mit
| 14,201
|
[
"VisIt"
] |
760a469a6f9155c2548be041b1d83dd751c67b612aec00b9f7a71cd6d138e28c
|
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""en_US word list is drawn from Education First's "1000 Most Common Words in English":
http://www.ef.edu/english-resources/english-vocabulary/top-1000-words/
Some words have been removed to make this list appropriate for public testing"""
word_list = (
'a',
'ability',
'able',
'about',
'above',
'accept',
'according',
'account',
'across',
'act',
'action',
'activity',
'actually',
'add',
'address',
'administration',
'admit',
'adult',
'affect',
'after',
'again',
'against',
'age',
'agency',
'agent',
'ago',
'agree',
'agreement',
'ahead',
'air',
'all',
'allow',
'almost',
'alone',
'along',
'already',
'also',
'although',
'always',
'American',
'among',
'amount',
'analysis',
'and',
'animal',
'another',
'answer',
'any',
'anyone',
'anything',
'appear',
'apply',
'approach',
'area',
'argue',
'arm',
'around',
'arrive',
'art',
'article',
'artist',
'as',
'ask',
'assume',
'at',
'attack',
'attention',
'attorney',
'audience',
'author',
'authority',
'available',
'avoid',
'away',
'baby',
'back',
'bad',
'bag',
'ball',
'bank',
'bar',
'base',
'be',
'beat',
'beautiful',
'because',
'become',
'bed',
'before',
'begin',
'behavior',
'behind',
'believe',
'benefit',
'best',
'better',
'between',
'beyond',
'big',
'bill',
'billion',
'bit',
'black',
'blood',
'blue',
'board',
'body',
'book',
'born',
'both',
'box',
'boy',
'break',
'bring',
'brother',
'budget',
'build',
'building',
'business',
'but',
'buy',
'by',
'call',
'camera',
'campaign',
'can',
'candidate',
'capital',
'car',
'card',
'care',
'career',
'carry',
'case',
'catch',
'cause',
'cell',
'center',
'central',
'century',
'certain',
'certainly',
'chair',
'challenge',
'chance',
'change',
'character',
'charge',
'check',
'child',
'choice',
'choose',
'church',
'citizen',
'city',
'civil',
'claim',
'class',
'clear',
'clearly',
'close',
'coach',
'cold',
'collection',
'college',
'color',
'commercial',
'common',
'community',
'company',
'compare',
'computer',
'concern',
'condition',
'conference',
'Congress',
'consider',
'consumer',
'contain',
'continue',
'control',
'cost',
'could',
'country',
'couple',
'course',
'court',
'cover',
'create',
'crime',
'cultural',
'culture',
'cup',
'current',
'customer',
'cut',
'dark',
'data',
'daughter',
'day',
'deal',
'debate',
'decade',
'decide',
'decision',
'deep',
'defense',
'degree',
'Democrat',
'democratic',
'describe',
'design',
'despite',
'detail',
'determine',
'develop',
'development',
'difference',
'different',
'difficult',
'dinner',
'direction',
'director',
'discover',
'discuss',
'discussion',
'do',
'doctor',
'dog',
'door',
'down',
'draw',
'dream',
'drive',
'drop',
'drug',
'during',
'each',
'early',
'east',
'easy',
'eat',
'economic',
'economy',
'edge',
'education',
'effect',
'effort',
'eight',
'either',
'election',
'else',
'employee',
'end',
'energy',
'enjoy',
'enough',
'enter',
'entire',
'environment',
'environmental',
'especially',
'establish',
'even',
'evening',
'event',
'ever',
'every',
'everybody',
'everyone',
'everything',
'evidence',
'exactly',
'example',
'executive',
'exist',
'expect',
'experience',
'expert',
'explain',
'eye',
'face',
'fact',
'factor',
'fall',
'family',
'far',
'fast',
'father',
'fear',
'federal',
'feel',
'feeling',
'few',
'field',
'fight',
'figure',
'fill',
'film',
'final',
'finally',
'financial',
'find',
'fine',
'finish',
'fire',
'firm',
'first',
'fish',
'five',
'floor',
'fly',
'focus',
'follow',
'food',
'foot',
'for',
'force',
'foreign',
'forget',
'form',
'former',
'forward',
'four',
'free',
'friend',
'from',
'front',
'full',
'fund',
'future',
'game',
'garden',
'gas',
'general',
'generation',
'get',
'girl',
'give',
'glass',
'go',
'goal',
'good',
'government',
'great',
'green',
'ground',
'group',
'grow',
'growth',
'guess',
'gun',
'guy',
'hair',
'half',
'hand',
'happen',
'happy',
'hard',
'have',
'he',
'head',
'health',
'hear',
'heart',
'heavy',
'help',
'her',
'here',
'herself',
'high',
'him',
'himself',
'his',
'history',
'hit',
'hold',
'home',
'hope',
'hospital',
'hot',
'hotel',
'hour',
'house',
'how',
'however',
'huge',
'human',
'hundred',
'husband',
'I',
'idea',
'identify',
'if',
'image',
'imagine',
'impact',
'important',
'improve',
'in',
'include',
'including',
'increase',
'indeed',
'indicate',
'individual',
'industry',
'information',
'inside',
'instead',
'institution',
'interest',
'interesting',
'international',
'interview',
'into',
'investment',
'involve',
'issue',
'it',
'item',
'its',
'itself',
'job',
'join',
'just',
'keep',
'key',
'kid',
'kind',
'kitchen',
'know',
'knowledge',
'land',
'language',
'large',
'last',
'late',
'later',
'laugh',
'law',
'lawyer',
'lay',
'lead',
'leader',
'learn',
'least',
'leave',
'left',
'leg',
'less',
'let',
'letter',
'level',
'life',
'light',
'like',
'likely',
'line',
'list',
'listen',
'little',
'live',
'local',
'long',
'look',
'lose',
'loss',
'lot',
'low',
'machine',
'magazine',
'main',
'maintain',
'major',
'majority',
'make',
'man',
'manage',
'management',
'manager',
'many',
'market',
'marriage',
'material',
'matter',
'may',
'maybe',
'me',
'mean',
'measure',
'media',
'medical',
'meet',
'meeting',
'member',
'memory',
'mention',
'message',
'method',
'middle',
'might',
'military',
'million',
'mind',
'minute',
'miss',
'mission',
'model',
'modern',
'moment',
'money',
'month',
'more',
'morning',
'most',
'mother',
'mouth',
'move',
'movement',
'movie',
'Mr',
'Mrs',
'much',
'music',
'must',
'my',
'myself',
'name',
'nation',
'national',
'natural',
'nature',
'near',
'nearly',
'necessary',
'need',
'network',
'never',
'new',
'news',
'newspaper',
'next',
'nice',
'night',
'no',
'none',
'nor',
'north',
'not',
'note',
'nothing',
'notice',
'now',
'number',
'occur',
'of',
'off',
'offer',
'office',
'officer',
'official',
'often',
'oil',
'ok',
'old',
'on',
'once',
'one',
'only',
'onto',
'open',
'operation',
'opportunity',
'option',
'or',
'order',
'organization',
'other',
'others',
'our',
'out',
'outside',
'over',
'own',
'owner',
'page',
'painting',
'paper',
'parent',
'part',
'participant',
'particular',
'particularly',
'partner',
'party',
'pass',
'past',
'pattern',
'pay',
'peace',
'people',
'per',
'perform',
'performance',
'perhaps',
'person',
'personal',
'phone',
'physical',
'pick',
'picture',
'piece',
'place',
'plan',
'plant',
'play',
'player',
'PM',
'point',
'police',
'policy',
'political',
'politics',
'poor',
'popular',
'population',
'position',
'positive',
'possible',
'power',
'practice',
'prepare',
'present',
'president',
'pressure',
'pretty',
'prevent',
'price',
'probably',
'process',
'produce',
'product',
'production',
'professional',
'professor',
'program',
'project',
'property',
'protect',
'prove',
'provide',
'public',
'pull',
'purpose',
'push',
'put',
'quality',
'question',
'quickly',
'quite',
'race',
'radio',
'raise',
'range',
'rate',
'rather',
'reach',
'read',
'ready',
'real',
'reality',
'realize',
'really',
'reason',
'receive',
'recent',
'recently',
'recognize',
'record',
'red',
'reduce',
'reflect',
'region',
'relate',
'relationship',
'religious',
'remain',
'remember',
'report',
'represent',
'Republican',
'require',
'research',
'resource',
'respond',
'response',
'responsibility',
'rest',
'result',
'return',
'reveal',
'rich',
'right',
'rise',
'risk',
'road',
'rock',
'role',
'room',
'rule',
'run',
'safe',
'same',
'save',
'say',
'scene',
'school',
'science',
'scientist',
'score',
'sea',
'season',
'seat',
'second',
'section',
'security',
'see',
'seek',
'seem',
'sell',
'send',
'senior',
'sense',
'series',
'serious',
'serve',
'service',
'set',
'seven',
'several',
'shake',
'share',
'she',
'short',
'should',
'shoulder',
'show',
'side',
'sign',
'significant',
'similar',
'simple',
'simply',
'since',
'sing',
'single',
'sister',
'sit',
'site',
'situation',
'six',
'size',
'skill',
'skin',
'small',
'smile',
'so',
'social',
'society',
'soldier',
'some',
'somebody',
'someone',
'something',
'sometimes',
'son',
'song',
'soon',
'sort',
'sound',
'source',
'south',
'southern',
'space',
'speak',
'special',
'specific',
'speech',
'spend',
'sport',
'spring',
'staff',
'stage',
'stand',
'standard',
'star',
'start',
'state',
'statement',
'station',
'stay',
'step',
'still',
'stock',
'stop',
'store',
'story',
'strategy',
'street',
'strong',
'structure',
'student',
'study',
'stuff',
'style',
'subject',
'success',
'successful',
'such',
'suddenly',
'suffer',
'suggest',
'summer',
'support',
'sure',
'surface',
'system',
'table',
'take',
'talk',
'task',
'tax',
'teach',
'teacher',
'team',
'technology',
'television',
'tell',
'ten',
'tend',
'term',
'test',
'than',
'thank',
'that',
'the',
'their',
'them',
'themselves',
'then',
'theory',
'there',
'these',
'they',
'thing',
'think',
'third',
'this',
'those',
'though',
'thought',
'thousand',
'threat',
'three',
'through',
'throughout',
'throw',
'thus',
'time',
'to',
'today',
'together',
'tonight',
'too',
'top',
'total',
'tough',
'toward',
'town',
'trade',
'traditional',
'training',
'travel',
'treat',
'treatment',
'tree',
'trial',
'trip',
'trouble',
'true',
'truth',
'try',
'turn',
'TV',
'two',
'type',
'under',
'understand',
'unit',
'until',
'up',
'upon',
'us',
'use',
'usually',
'value',
'various',
'very',
'view',
'visit',
'voice',
'vote',
'wait',
'walk',
'wall',
'want',
'war',
'watch',
'water',
'way',
'we',
'wear',
'week',
'weight',
'well',
'west',
'western',
'what',
'whatever',
'when',
'where',
'whether',
'which',
'while',
'white',
'who',
'whole',
'whom',
'whose',
'why',
'wide',
'wife',
'will',
'win',
'wind',
'window',
'wish',
'with',
'within',
'without',
'woman',
'wonder',
'word',
'work',
'worker',
'world',
'worry',
'would',
'write',
'writer',
'wrong',
'yard',
'yeah',
'year',
'yes',
'yet',
'you',
'young',
'your',
'yourself',
)
|
danhuss/faker
|
faker/providers/lorem/en_US/__init__.py
|
Python
|
mit
| 17,382
|
[
"VisIt"
] |
c60a3247d11bd15a6408fb3287f66fcd029e68c67d9be25cdceafc05a7daab93
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright © 2013-2018 Antergos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" Internationalisation helper functions (read languagelist.data) """
def utf8(my_string, errors="strict"):
""" Decode a string as UTF-8 if it isn't already Unicode. """
if isinstance(my_string, str):
return my_string
else:
return str(my_string, "utf-8", errors)
def get_languages(language_list="data/languagelist.data.gz", current_language_index=-1):
""" Returns a tuple of (current language, sorted choices, display map). """
import gzip
# import icu
current_language = "English"
languagelist = gzip.open(language_list)
language_display_map = {}
i = 0
for line in languagelist:
line = utf8(line)
if line == '' or line == '\n':
continue
code, name, trans = line.strip('\n').split(':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
# KDE fails to round-trip strings containing U+FEFF ZERO WIDTH
# NO-BREAK SPACE, and we don't care about the NBSP anyway, so strip
# it.
# https://bugs.launchpad.net/bugs/1001542
# (comment #5 and on)
trans = trans.strip(" \ufeff")
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
# try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
# collator = icu.Collator.createInstance(icu.Locale('C'))
# except:
# collator = None
collator = None
def compare_choice(position):
""" auxiliary compare function """
if language_display_map[position][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(position).getByteArray()
except Exception:
return position
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return position
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
|
Antergos/Cnchi
|
src/misc/i18n.py
|
Python
|
gpl-3.0
| 3,222
|
[
"FEFF"
] |
b138bab13f1d15324bb5d999045eabeb537f633f93ab76df683bdd4d0c522989
|
"""
CAD :cite:`cad-Russo2017_e19428` is a method aimed to capture structures of
higher-order correlation in massively parallel spike trains. In particular, it
is able to extract patterns of spikes with arbitrary configuration of time lags
(time interval between spikes in a pattern), and at multiple time scales,
e.g. from synchronous patterns to firing rate co-modulations.
CAD consists of a statistical parametric testing done on the level of pairs
of neurons, followed by an agglomerative recursive algorithm, in order to
detect and test statistically precise repetitions of spikes in the data.
In particular, pairs of neurons are tested for significance under the null
hypothesis of independence, and then the significant pairs are agglomerated
into higher order patterns.
Given a list of discretized (binned) spike trains by a given temporal
scale (bin_size), assumed to be recorded in parallel, the CAD analysis can be
applied as demonstrated in this short toy example of 5 parallel spike trains
that exhibit fully synchronous events of order 5.
.. autosummary::
:toctree: _toctree/cell_assembly_detection
cell_assembly_detection
Visualization
-------------
Visualization of CAD method is covered in Viziphant
:func:`viziphant.patterns.plot_patterns`
See Also
--------
elephant.spade.spade : advanced synchronous patterns detection
Examples
--------
>>> import quantities as pq
>>> import numpy as np
>>> from elephant.cell_assembly_detection import cell_assembly_detection
>>> from elephant.spike_train_generation import compound_poisson_process
>>> from elephant.conversion import BinnedSpikeTrain
Generate correlated data and bin it with a bin_size of 10ms.
>>> np.random.seed(30)
>>> spiketrains = compound_poisson_process(rate=15*pq.Hz,
... amplitude_distribution=[0, 0.95, 0, 0, 0, 0, 0.05], t_stop=5*pq.s)
>>> bst = BinnedSpikeTrain(spiketrains, bin_size=10 * pq.ms)
>>> bst.rescale('ms')
Call of the method.
>>> patterns = cell_assembly_detection(bst, max_lag=2)
>>> patterns[0]
{'neurons': [0, 2],
'lags': array([0.]) * ms,
'pvalue': [5.3848138041122556e-05],
'times': array([ 90., 160., 170., 550., 790., 910., 930., 1420., 1470.,
1480., 1650., 2030., 2220., 2570., 3130., 3430., 3480., 3610.,
3800., 3830., 3930., 4080., 4560., 4600., 4670.]) * ms,
'signature': [[1, 83], [2, 25]]}
Refer to the Viziphant documentation regarding the visualization of this
example.
"""
from __future__ import division, print_function, unicode_literals
import copy
import math
import time
import warnings
import numpy as np
from scipy.stats import f
import elephant.conversion as conv
from elephant.utils import deprecated_alias
__all__ = [
"cell_assembly_detection"
]
@deprecated_alias(data='binned_spiketrain', maxlag='max_lag',
min_occ='min_occurrences',
same_config_cut='same_configuration_pruning')
def cell_assembly_detection(binned_spiketrain, max_lag, reference_lag=2,
alpha=0.05, min_occurrences=1, size_chunks=100,
max_spikes=np.inf, significance_pruning=True,
subgroup_pruning=True,
same_configuration_pruning=False,
bool_times_format=None, verbose=False):
"""
Perform the CAD analysis :cite:`cad-Russo2017_e19428` for the binned
(discretized) spike trains given in the input. The method looks for
candidate significant patterns with lags (number of bins between successive
spikes in the pattern) ranging from `-max_lag` to `max_lag` (the second
parameter of the function). Thus, between two successive spikes in the
pattern there can be at most `max_lag`*`bin_size` units of time.
The method agglomerates pairs of units (or a unit and a preexisting
assembly), tests their significance by a statistical test
and stops when the detected assemblies reach their maximal dimension
(parameter `max_spikes`).
At every agglomeration size step (e.g. from triplets to quadruplets), the
method filters patterns having the same neurons involved, and keeps only
the most significant one. This pruning is optional and the choice is
identified by the parameter 'significance_pruning'.
Assemblies already included in a bigger assembly are eliminated in a final
pruning step. Also this pruning is optional, and the choice is identified
by the parameter `subgroup_pruning`.
Parameters
----------
binned_spiketrain : elephant.conversion.BinnedSpikeTrain
Binned spike trains containing data to be analyzed.
max_lag : int
Maximal lag to be tested. For a binning dimension of bin_size the
method will test all pairs configurations with a time
shift between '-max_lag' and 'max_lag'.
reference_lag : int, optional
Reference lag (in bins) for the non-stationarity correction in the
statistical test.
Default: 2
alpha : float, optional
Significance level for the statistical test.
Default: 0.05
min_occurrences : int, optional
Minimal number of occurrences required for an assembly
(all assemblies, even if significant, with fewer occurrences
than min_occurrences are discarded).
Default: 0
size_chunks : int, optional
Size (in bins) of chunks in which the spike trains are divided
to compute the variance (to reduce non stationarity effects
on variance estimation).
Default: 100
max_spikes : int, optional
Maximal assembly order (the algorithm will return assemblies
composed of maximum `max_spikes` elements).
Default: `np.inf`
significance_pruning : bool, optional
If True, the method performs significance pruning among
the detected assemblies.
Default: True
subgroup_pruning : bool, optional
If True, the method performs subgroup pruning among
the detected assemblies.
Default: True
same_configuration_pruning : bool, optional
If True, performs pruning (not present in the original code and more
efficient), not testing assemblies already formed
if they appear in the very same configuration.
Default: False
bool_times_format : bool, optional
.. deprecated:: 0.10.0
Has no effect, the returning 'times' are always a quantity array
specifying the pattern spike times.
Default: None
verbose : bool, optional
Regulates the number of prints given by the method. If true all prints
are given, otherwise the method does give any prints.
Default: False
Returns
-------
assembly : list of dict
Contains the assemblies detected for the bin size chosen. Each
assembly is a dictionary with attributes:
'neurons' : list
Vector of units taking part to the assembly (unit order correspond
to the agglomeration order).
'lag' : pq.Quantity
Vector of time lags.
`lag[z]` is the activation delay between `neurons[1]` and
`neurons[z+1]`.
'pvalue' : list
Vector containing p-values.
`pvalue[z]` is the p-value of the statistical test between
performed adding `neurons[z+1]` to the `neurons[1:z]`.
'times' : pq.Quantity
Assembly activation times in the units of `binned_spiketrain`.
'signature' : np.ndarray
Array of two entries `(z,c)`. The first is the number of neurons
participating in the assembly (size), and the second is number of
assembly occurrences.
Raises
------
TypeError
If `binned_spiketrain` is not an instance of
`elephant.conversion.BinnedSpikeTrain`.
ValueError
If the parameters are out of bounds.
Notes
-----
Alias: cad
"""
initial_time = time.time()
# check parameter input and raise errors if necessary
_raise_errors(binned_spiketrain=binned_spiketrain,
max_lag=max_lag,
alpha=alpha,
min_occurrences=min_occurrences,
size_chunks=size_chunks,
max_spikes=max_spikes)
if bool_times_format is not None:
warnings.warn("'bool_times_format' is deprecated and has no effect; "
"the returning 'times' are always a quantity array "
"specifying the pattern spike times. Set this parameter "
"to None.", DeprecationWarning)
bin_size = binned_spiketrain.bin_size
t_start = binned_spiketrain.t_start
# transform the binned spiketrain into array
binned_spiketrain = binned_spiketrain.to_array()
# zero order
n_neurons = len(binned_spiketrain)
# initialize empty assembly
assembly_in = [{'neurons': None,
'lags': None,
'pvalue': None,
'times': None,
'signature': None} for _ in range(n_neurons)]
# initializing the dictionaries
if verbose:
print('Initializing the dictionaries...')
for w1 in range(n_neurons):
assembly_in[w1]['neurons'] = [w1]
assembly_in[w1]['lags'] = []
assembly_in[w1]['pvalue'] = []
assembly_in[w1]['times'] = binned_spiketrain[w1]
assembly_in[w1]['signature'] = [[1, sum(binned_spiketrain[w1])]]
# first order = test over pairs
# denominator of the Bonferroni correction
# divide alpha by the number of tests performed in the first
# pairwise testing loop
number_test_performed = n_neurons * (n_neurons - 1) * (2 * max_lag + 1)
alpha = alpha * 2 / float(number_test_performed)
if verbose:
print('actual significance_level', alpha)
# sign_pairs_matrix is the matrix with entry as 1 for the significant pairs
sign_pairs_matrix = np.zeros((n_neurons, n_neurons), dtype=np.int)
assembly = []
if verbose:
print('Testing on pairs...')
# nns: count of the existing assemblies
nns = 0
# initialize the structure existing_patterns, storing the patterns
# determined by neurons and lags:
# if the pattern is already existing, don't do the test
existing_patterns = []
# for loop for the pairwise testing
for w1 in range(n_neurons - 1):
for w2 in range(w1 + 1, n_neurons):
spiketrain2 = binned_spiketrain[w2]
n2 = w2
assembly_flag = 0
# call of the function that does the pairwise testing
call_tp = _test_pair(
ensemble=assembly_in[w1],
spiketrain2=spiketrain2,
n2=n2,
max_lag=max_lag,
size_chunks=size_chunks,
reference_lag=reference_lag,
existing_patterns=existing_patterns,
same_configuration_pruning=same_configuration_pruning)
if same_configuration_pruning:
assem_tp = call_tp[0]
else:
assem_tp = call_tp
# if the assembly given in output is significant and the number
# of occurrences is higher than the minimum requested number
if assem_tp['pvalue'][-1] < alpha and \
assem_tp['signature'][-1][1] > min_occurrences:
# save the assembly in the output
assembly.append(assem_tp)
sign_pairs_matrix[w1][w2] = 1
assembly_flag = 1 # flag : it is indeed an assembly
# put the item_candidate into the existing_patterns list
if same_configuration_pruning:
item_candidate = call_tp[1]
if not existing_patterns:
existing_patterns = [item_candidate]
else:
existing_patterns.append(item_candidate)
if assembly_flag:
nns += 1 # count of the existing assemblies
# making sign_pairs_matrix symmetric
sign_pairs_matrix = sign_pairs_matrix + sign_pairs_matrix.T
sign_pairs_matrix[sign_pairs_matrix == 2] = 1
# print(sign_pairs_matrix)
# second order and more: increase the assembly size by adding a new unit
# the algorithm will return assemblies composed by
# maximum max_spikes elements
if verbose:
print('\nTesting on higher order assemblies...\n')
# keep the count of the current size of the assembly
current_size_agglomeration = 2
# number of groups previously found
n_as = len(assembly)
# w2_to_test_v : contains the elements to test with the elements that are
# in the assembly in input
w2_to_test_v = np.zeros(n_neurons)
# testing for higher order assemblies
w1 = 0
while w1 < n_as:
w1_elements = assembly[w1]['neurons']
# Add only neurons that have significant first order
# co-occurrences with members of the assembly
# Find indices and values of nonzero elements
for i in range(len(w1_elements)):
w2_to_test_v += sign_pairs_matrix[w1_elements[i]]
# w2_to_test_p : vector with the index of nonzero elements
w2_to_test_p = np.nonzero(w2_to_test_v)[0]
# list with the elements to test
# that are not already in the assembly
w2_to_test = [item for item in w2_to_test_p
if item not in w1_elements]
pop_flag = 0
# check that there are candidate neurons for agglomeration
if w2_to_test:
# bonferroni correction only for the tests actually performed
alpha = alpha / float(len(w2_to_test) * n_as * (2 * max_lag + 1))
# testing for the element in w2_to_test
for ww2 in range(len(w2_to_test)):
w2 = w2_to_test[ww2]
spiketrain2 = binned_spiketrain[w2]
assembly_flag = 0
pop_flag = max(assembly_flag, 0)
# testing for the assembly and the new neuron
call_tp = _test_pair(
ensemble=assembly[w1],
spiketrain2=spiketrain2,
n2=w2,
max_lag=max_lag,
size_chunks=size_chunks,
reference_lag=reference_lag,
existing_patterns=existing_patterns,
same_configuration_pruning=same_configuration_pruning)
if same_configuration_pruning:
assem_tp = call_tp[0]
else:
assem_tp = call_tp
# if it is significant and
# the number of occurrences is sufficient and
# the length of the assembly is less than the input limit
if assem_tp['pvalue'][-1] < alpha and \
assem_tp['signature'][-1][1] > min_occurrences and \
assem_tp['signature'][-1][0] <= max_spikes:
# the assembly is saved in the output list of
# assemblies
assembly.append(assem_tp)
assembly_flag = 1
if len(assem_tp['neurons']) > current_size_agglomeration:
# up to the next agglomeration level
current_size_agglomeration += 1
# Pruning step 1
# between two assemblies with the same unit set
# arranged into different
# configurations, choose the most significant one
if significance_pruning is True and \
current_size_agglomeration > 3:
assembly, n_filtered_assemblies = \
_significance_pruning_step(
pre_pruning_assembly=assembly)
if same_configuration_pruning:
item_candidate = call_tp[1]
existing_patterns.append(item_candidate)
if assembly_flag:
# count one more assembly
nns += 1
n_as = len(assembly)
# if at least once the assembly was agglomerated to a bigger one,
# pop the smaller one
if pop_flag:
assembly.pop(w1)
w1 = w1 + 1
# Pruning step 1
# between two assemblies with the same unit set arranged into different
# configurations, choose the most significant one
# Last call for pruning of last order agglomeration
if significance_pruning:
assembly = _significance_pruning_step(pre_pruning_assembly=assembly)[0]
# Pruning step 2
# Remove assemblies whom elements are already
# ALL included in a bigger assembly
if subgroup_pruning:
assembly = _subgroup_pruning_step(pre_pruning_assembly=assembly)
# Reformat of the activation times
for pattern in assembly:
times = np.where(pattern['times'] > 0)[0] * bin_size + t_start
pattern['times'] = times
pattern['lags'] = pattern['lags'] * bin_size
pattern['signature'] = np.array(pattern['signature'], dtype=np.int32)
# Give as output only the maximal groups
if verbose:
print('\nGiving outputs of the method...\n')
print('final_assembly')
for item in assembly:
print(item['neurons'],
item['lags'],
item['signature'])
# Time needed for the computation
if verbose:
print('\ntime', time.time() - initial_time)
return assembly
def _chunking(binned_pair, size_chunks, max_lag, best_lag):
"""
Chunking the object binned_pair into parts with the same bin length
Parameters
----------
binned_pair : np.array
vector of the binned spike trains for the pair being analyzed
size_chunks : int
size of chunks desired
max_lag : int
max number of lags for the bin_size chosen
best_lag : int
lag with the higher number of coincidences
Returns
-------
chunked : list
list with the object binned_pair cut in size_chunks parts
n_chunks : int
number of chunks
"""
length = len(binned_pair[0], )
# number of chunks
n_chunks = math.ceil((length - max_lag) / size_chunks)
# new chunk size, this is to have all chunks of roughly the same size
size_chunks = math.floor((length - max_lag) / n_chunks)
n_chunks = np.int(n_chunks)
size_chunks = np.int(size_chunks)
chunked = [[[], []] for _ in range(n_chunks)]
# cut the time series according to best_lag
binned_pair_cut = np.array([np.zeros(length - max_lag, dtype=np.int),
np.zeros(length - max_lag, dtype=np.int)])
# choose which entries to consider according to the best lag chosen
if best_lag == 0:
binned_pair_cut[0] = binned_pair[0][0:length - max_lag]
binned_pair_cut[1] = binned_pair[1][0:length - max_lag]
elif best_lag > 0:
binned_pair_cut[0] = binned_pair[0][0:length - max_lag]
binned_pair_cut[1] = binned_pair[1][
best_lag:length - max_lag + best_lag]
else:
binned_pair_cut[0] = binned_pair[0][
-best_lag:length - max_lag - best_lag]
binned_pair_cut[1] = binned_pair[1][0:length - max_lag]
# put the cut data into the chunked object
for iii in range(n_chunks - 1):
chunked[iii][0] = binned_pair_cut[0][
size_chunks * iii:size_chunks * (iii + 1)]
chunked[iii][1] = binned_pair_cut[1][
size_chunks * iii:size_chunks * (iii + 1)]
# last chunk can be of slightly different size
chunked[n_chunks - 1][0] = binned_pair_cut[0][
size_chunks * (n_chunks - 1):length]
chunked[n_chunks - 1][1] = binned_pair_cut[1][
size_chunks * (n_chunks - 1):length]
return chunked, n_chunks
def _assert_same_pattern(item_candidate, existing_patterns, max_lag):
"""
Tests if a particular pattern has already been tested and retrieved as
significant.
Parameters
----------
item_candidate : list of list with two components
in the first component there are the neurons involved in the assembly,
in the second there are the correspondent lags
existing_patterns : list
list of the already significant patterns
max_lag : int
maximum lag to be tested
Returns
-------
True if the pattern was already tested and retrieved as significant
False if not
"""
# unique representation of pattern in term of lags, maxlag and neurons
# participating
item_candidate = sorted(item_candidate[0] * 2 * max_lag +
item_candidate[1] + max_lag)
if item_candidate in existing_patterns:
return True
else:
return False
def _test_pair(ensemble, spiketrain2, n2, max_lag, size_chunks, reference_lag,
existing_patterns, same_configuration_pruning):
"""
Tests if two spike trains have repetitive patterns occurring more
frequently than chance.
Parameters
----------
ensemble : dictionary
structure with the previously formed assembly and its spike train
spiketrain2 : list
spike train of the new unit to be tested for significance
(candidate to be a new assembly member)
n2 : int
new unit tested
max_lag : int
maximum lag to be tested
size_chunks : int
size (in bins) of chunks in which the spike trains is divided
to compute the variance (to reduce non stationarity effects
on variance estimation)
reference_lag : int
lag of reference; if zero or negative reference lag=-l
existing_patterns : list
list of the already significant patterns
same_configuration_pruning : bool
if True (not present in the original code and more
efficient), does not test assemblies already formed
if they appear in the very same configuration
Default: False
Returns
-------
assembly : dictionary
assembly formed by the method (can be empty), with attributes:
'elements' : vector of units taking part to the assembly
(unit order correspond to the agglomeration order)
'lag' : vector of time lags (lag[z] is the activation delay between
elements[1] and elements[z+1]
'pvalue' : vector of pvalues. `pr[z]` is the p-value of the statistical
test between performed adding elements[z+1] to the elements[1:z]
'times' : assembly activation time. It reports how many times the
complete assembly activates in that bin.
time always refers to the activation of the first listed
assembly element (elements[1]), that doesn't necessarily
corresponds to the first unit firing.
'signature' : array of two entries (z,c). The first is the number of
neurons participating in the assembly (size),
while the second is number of assembly occurrences.
item_candidate : list of list with two components
in the first component there are the neurons involved in the assembly,
in the second there are the correspondent lags.
"""
# list with the binned spike trains of the two neurons
binned_pair = [ensemble['times'], spiketrain2]
# For large bin_sizes, the binned spike counts may potentially fluctuate
# around a high mean level and never fall below some minimum count
# considerably larger than zero for the whole time series.
# Entries up to this minimum count would contribute
# to the coincidence count although they are completely
# uninformative, so we subtract the minima.
binned_pair = np.array([binned_pair[0] - min(binned_pair[0]),
binned_pair[1] - min(binned_pair[1])])
ntp = len(binned_pair[0]) # trial length
# Divide in parallel trials with 0/1 elements
# max number of spikes in one bin for both neurons
maxrate = np.int(max(max(binned_pair[0]), max(binned_pair[1])))
# creation of the parallel processes, one for each rate up to maxrate
# and computation of the coincidence count for both neurons
par_processes = np.zeros((maxrate, 2, ntp), dtype=np.int)
par_proc_expectation = np.zeros(maxrate, dtype=np.int)
for i in range(maxrate):
par_processes[i] = np.array(binned_pair > i, dtype=np.int)
par_proc_expectation[i] = (np.sum(par_processes[i][0]) * np.sum(
par_processes[i][1])) / float(ntp)
# Decide which is the lag with most coincidences (l_ : best lag)
# we are calculating the joint spike count of units A and B at lag l.
# It is computed by counting the number
# of times we have a spike in A and a corresponding spike in unit B
# l times later for every lag,
# we select the one corresponding to the highest count
# structure with the coincidence counts for each lag
fwd_coinc_count = np.array([0 for _ in range(max_lag + 1)])
bwd_coinc_count = np.array([0 for _ in range(max_lag + 1)])
for lag in range(max_lag + 1):
time_fwd_cc = np.array([binned_pair[0][
0:len(binned_pair[0]) - max_lag],
binned_pair[1][
lag:len(binned_pair[1]) - max_lag + lag]])
time_bwd_cc = np.array([binned_pair[0][
lag:len(binned_pair[0]) - max_lag + lag],
binned_pair[1][
0:len(binned_pair[1]) - max_lag]])
# taking the minimum, place by place for the coincidences
fwd_coinc_count[lag] = np.sum(np.minimum(time_fwd_cc[0],
time_fwd_cc[1]))
bwd_coinc_count[lag] = np.sum(np.minimum(time_bwd_cc[0],
time_bwd_cc[1]))
# choice of the best lag, taking into account the reference lag
if reference_lag <= 0:
# if the global maximum is in the forward process (A to B)
if np.amax(fwd_coinc_count) > np.amax(bwd_coinc_count):
# bwd_flag indicates whether we are in time_fwd_cc or time_bwd_cc
fwd_flag = 1
global_maximum_index = np.argmax(fwd_coinc_count)
else:
fwd_flag = 2
global_maximum_index = np.argmax(bwd_coinc_count)
best_lag = (fwd_flag == 1) * global_maximum_index - (
fwd_flag == 2) * global_maximum_index
max_coinc_count = max(np.amax(fwd_coinc_count),
np.amax(bwd_coinc_count))
else:
# reverse the ctAB_ object and not take into account the first entry
bwd_coinc_count_rev = bwd_coinc_count[1:len(bwd_coinc_count)][::-1]
hab_l = np.append(bwd_coinc_count_rev, fwd_coinc_count)
lags = range(-max_lag, max_lag + 1)
max_coinc_count = np.amax(hab_l)
best_lag = lags[np.argmax(hab_l)]
if best_lag < 0:
lag_ref = best_lag + reference_lag
coinc_count_ref = hab_l[lags.index(lag_ref)]
else:
lag_ref = best_lag - reference_lag
coinc_count_ref = hab_l[lags.index(lag_ref)]
# now check whether the pattern, with those neurons and that particular
# configuration of lags,
# is already in the list of the significant patterns
# if it is, don't do the testing
# if it is not, continue
previous_neu = ensemble['neurons']
pattern_candidate = copy.copy(previous_neu)
pattern_candidate.append(n2)
pattern_candidate = np.array(pattern_candidate)
# add both the new lag and zero
previous_lags = ensemble['lags']
lags_candidate = copy.copy(previous_lags)
lags_candidate.append(best_lag)
lags_candidate[:0] = [0]
pattern_candidate = list(pattern_candidate)
lags_candidate = list(lags_candidate)
item_candidate = [[pattern_candidate], [lags_candidate]]
if same_configuration_pruning:
if _assert_same_pattern(item_candidate=item_candidate,
existing_patterns=existing_patterns,
max_lag=max_lag):
en_neurons = copy.copy(ensemble['neurons'])
en_neurons.append(n2)
en_lags = copy.copy(ensemble['lags'])
en_lags.append(np.inf)
en_pvalue = copy.copy(ensemble['pvalue'])
en_pvalue.append(1)
en_n_occ = copy.copy(ensemble['signature'])
en_n_occ.append([0, 0])
item_candidate = []
assembly = {'neurons': en_neurons,
'lags': en_lags,
'pvalue': en_pvalue,
'times': [],
'signature': en_n_occ}
return assembly, item_candidate
else:
# I go on with the testing
pair_expectation = np.sum(par_proc_expectation)
# case of no coincidences or limit for the F asimptotical
# distribution (too few coincidences)
if max_coinc_count == 0 or pair_expectation <= 5 or \
pair_expectation >= (min(np.sum(binned_pair[0]),
np.sum(binned_pair[1])) - 5):
en_neurons = copy.copy(ensemble['neurons'])
en_neurons.append(n2)
en_lags = copy.copy(ensemble['lags'])
en_lags.append(np.inf)
en_pvalue = copy.copy(ensemble['pvalue'])
en_pvalue.append(1)
en_n_occ = copy.copy(ensemble['signature'])
en_n_occ.append([0, 0])
assembly = {'neurons': en_neurons,
'lags': en_lags,
'pvalue': en_pvalue,
'times': [],
'signature': en_n_occ}
if same_configuration_pruning:
item_candidate = []
return assembly, item_candidate
else:
return assembly
else: # construct the activation series for binned_pair
length = len(binned_pair[0]) # trial length
activation_series = np.zeros(length)
if reference_lag <= 0:
if best_lag == 0: # synchrony case
for i in range(maxrate): # for all parallel processes
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
activation_series = \
np.add(activation_series,
np.multiply(par_processes_a,
par_processes_b))
coinc_count_matrix = np.array([[0, fwd_coinc_count[0]],
[bwd_coinc_count[2], 0]])
# matrix with #AB and #BA
# here we specifically choose
# 'l* = -2' for the synchrony case
elif best_lag > 0:
for i in range(maxrate):
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
# multiplication between the two binned time series
# shifted by best_lag
activation_series[0:length - best_lag] = \
np.add(activation_series[0:length - best_lag],
np.multiply(par_processes_a[
0:length - best_lag],
par_processes_b[
best_lag:length]))
coinc_count_matrix = \
np.array([[0, fwd_coinc_count[global_maximum_index]],
[bwd_coinc_count[global_maximum_index], 0]])
else:
for i in range(maxrate):
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
activation_series[-best_lag:length] = \
np.add(activation_series[-best_lag:length],
np.multiply(par_processes_a[
-best_lag:length],
par_processes_b[
0:length + best_lag]))
coinc_count_matrix = \
np.array([[0, fwd_coinc_count[global_maximum_index]],
[bwd_coinc_count[global_maximum_index], 0]])
else:
if best_lag == 0:
for i in range(maxrate):
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
activation_series = \
np.add(activation_series,
np.multiply(par_processes_a,
par_processes_b))
elif best_lag > 0:
for i in range(maxrate):
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
activation_series[0:length - best_lag] = \
np.add(activation_series[0:length - best_lag],
np.multiply(par_processes_a[
0:length - best_lag],
par_processes_b[
best_lag:length]))
else:
for i in range(maxrate):
par_processes_a = par_processes[i][0]
par_processes_b = par_processes[i][1]
activation_series[-best_lag:length] = \
np.add(activation_series[-best_lag:length],
np.multiply(par_processes_a[
-best_lag:length],
par_processes_b[
0:length + best_lag]))
coinc_count_matrix = np.array([[0, max_coinc_count],
[coinc_count_ref, 0]])
# chunking
chunked, nch = _chunking(binned_pair=binned_pair,
size_chunks=size_chunks,
max_lag=max_lag,
best_lag=best_lag)
marginal_counts = np.zeros((nch, maxrate, 2), dtype=np.int)
# for every chunk, a vector with in each entry the sum of elements
# in each parallel binary process, for each unit
# maxrate_t : contains the maxrates for both neurons in each chunk
maxrate_t = np.zeros(nch, dtype=np.int)
# ch_nn : contains the length of the different chunks
ch_nn = np.zeros(nch, dtype=np.int)
count_sum = 0
# for every chunk build the parallel processes
# and the coincidence counts
for iii in range(nch):
binned_pair_chunked = np.array(chunked[iii])
maxrate_t[iii] = max(max(binned_pair_chunked[0]),
max(binned_pair_chunked[1]))
ch_nn[iii] = len(chunked[iii][0])
par_processes_chunked = [None for _ in range(
np.int(maxrate_t[iii]))]
for i in range(np.int(maxrate_t[iii])):
par_processes_chunked[i] = np.zeros(
(2, len(binned_pair_chunked[0])), dtype=np.int)
par_processes_chunked[i] = np.array(binned_pair_chunked > i,
dtype=np.int)
for i in range(np.int(maxrate_t[iii])):
par_processes_a = par_processes_chunked[i][0]
par_processes_b = par_processes_chunked[i][1]
marginal_counts[iii][i][0] = np.int(np.sum(par_processes_a))
marginal_counts[iii][i][1] = np.int(np.sum(par_processes_b))
count_sum = count_sum + min(marginal_counts[iii][i][0],
marginal_counts[iii][i][1])
# marginal_counts[iii][i] has in its entries
# '[ #_a^{\alpha,c} , #_b^{\alpha,c}]'
# where '\alpha' goes from 1 to maxrate, c goes from 1 to nch
# calculation of variance for each chunk
n = ntp - max_lag # used in the calculation of the p-value
var_x = [np.zeros((2, 2)) for _ in range(nch)]
var_tot = 0
cov_abab = [0 for _ in range(nch)]
cov_abba = [0 for _ in range(nch)]
var_t = [np.zeros((2, 2)) for _ in range(nch)]
cov_x = [np.zeros((2, 2)) for _ in range(nch)]
for iii in range(nch): # for every chunk
ch_size = ch_nn[iii]
# evaluation of AB + variance and covariance
cov_abab[iii] = [[0 for _ in range(maxrate_t[iii])]
for _ in range(maxrate_t[iii])]
# for every rate up to the maxrate in that chunk
for i in range(maxrate_t[iii]):
par_marg_counts_i = \
np.outer(marginal_counts[iii][i], np.ones(2))
cov_abab[iii][i][i] = \
np.multiply(
np.multiply(par_marg_counts_i, par_marg_counts_i.T)
/ float(ch_size),
np.multiply(ch_size - par_marg_counts_i,
ch_size - par_marg_counts_i.T)
/ float(ch_size * (ch_size - 1)))
# calculation of the variance
var_t[iii] = var_t[iii] + cov_abab[iii][i][i]
# cross covariances terms
if maxrate_t[iii] > 1:
for j in range(i + 1, maxrate_t[iii]):
par_marg_counts_j = \
np.outer(marginal_counts[iii][j], np.ones(2))
cov_abab[iii][i][j] = \
2 * np.multiply(
np.multiply(par_marg_counts_j,
par_marg_counts_j.T)
/ float(ch_size),
np.multiply(ch_size - par_marg_counts_i,
ch_size - par_marg_counts_i.T)
/ float(ch_size * (ch_size - 1)))
# update of the variance
var_t[iii] = var_t[iii] + cov_abab[iii][i][j]
# evaluation of coinc_count_matrix = #AB - #BA
cov_abba[iii] = [[0 for _ in range(maxrate_t[iii])]
for _ in range(maxrate_t[iii])]
for i in range(maxrate_t[iii]):
par_marg_counts_i = \
np.outer(marginal_counts[iii][i], np.ones(2))
cov_abba[iii][i][i] = \
np.multiply(
np.multiply(par_marg_counts_i, par_marg_counts_i.T)
/ float(ch_size),
np.multiply(ch_size - par_marg_counts_i,
ch_size - par_marg_counts_i.T)
/ float(ch_size * (ch_size - 1) ** 2))
cov_x[iii] = cov_x[iii] + cov_abba[iii][i][i]
if maxrate_t[iii] > 1:
for j in range((i + 1), maxrate_t[iii]):
par_marg_counts_j = \
np.outer(marginal_counts[iii][j], np.ones(2))
cov_abba[iii][i][j] = \
2 * np.multiply(
np.multiply(par_marg_counts_j,
par_marg_counts_j.T)
/ float(ch_size),
np.multiply(ch_size - par_marg_counts_i,
ch_size - par_marg_counts_i.T)
/ float(ch_size * (ch_size - 1) ** 2))
cov_x[iii] = cov_x[iii] + cov_abba[iii][i][j]
var_x[iii] = var_t[iii] + var_t[iii].T - cov_x[iii] - cov_x[iii].T
var_tot = var_tot + var_x[iii]
# Yates correction
coinc_count_matrix = coinc_count_matrix - coinc_count_matrix.T
if abs(coinc_count_matrix[0][1]) > 0:
coinc_count_matrix = abs(coinc_count_matrix) - 0.5
if var_tot[0][1] == 0:
pr_f = 1
# p-value obtained through approximation to a Fischer F distribution
# (here we employ the survival function)
else:
fstat = coinc_count_matrix ** 2 / var_tot
pr_f = f.sf(fstat[0][1], 1, n)
# Creation of the dictionary with the results
en_neurons = copy.copy(ensemble['neurons'])
en_neurons.append(n2)
en_lags = copy.copy(ensemble['lags'])
en_lags.append(best_lag)
en_pvalue = copy.copy(ensemble['pvalue'])
en_pvalue.append(pr_f)
en_n_occ = copy.copy(ensemble['signature'])
en_n_occ.append([len(en_neurons), sum(activation_series)])
assembly = {'neurons': en_neurons,
'lags': en_lags,
'pvalue': en_pvalue,
'times': activation_series,
'signature': en_n_occ}
if same_configuration_pruning:
return assembly, item_candidate
else:
return assembly
def _significance_pruning_step(pre_pruning_assembly):
"""
Between two assemblies with the same unit set arranged into different
configurations the most significant one is chosen.
Parameters
----------
pre_pruning_assembly : list
contains the whole set of significant assemblies (unfiltered)
Returns
-------
assembly : list
contains the filtered assemblies
n_filtered_assemblies : int
number of filtered assemblies by the function
"""
# number of assemblies before pruning
nns = len(pre_pruning_assembly)
# boolean array for selection of assemblies to keep
selection = []
# list storing the found assemblies
assembly = []
for i in range(nns):
elem = sorted(pre_pruning_assembly[i]['neurons'])
# in the list, so that membership can be checked
if elem in selection:
# find the element that was already in the list
pre = selection.index(elem)
if pre_pruning_assembly[i]['pvalue'][-1] <= \
assembly[pre]['pvalue'][-1]:
# if the new element has a p-value that is smaller
# than the one had previously
selection[pre] = elem
# substitute the prev element in the selection with the new
assembly[pre] = pre_pruning_assembly[i]
# substitute also in the list of the new assemblies
if elem not in selection:
selection.append(elem)
assembly.append(pre_pruning_assembly[i])
# number of assemblies filtered out is equal to the difference
# between the pre and post pruning size
n_filtered_assemblies = nns - len(assembly)
return assembly, n_filtered_assemblies
def _subgroup_pruning_step(pre_pruning_assembly):
"""
Removes assemblies which are already all included in a bigger assembly
Parameters
----------
pre_pruning_assembly : list
contains the assemblies filtered by the significance value
Returns
--------
final_assembly : list
contains the assemblies filtered by inclusion
"""
# reversing the semifinal_assembly makes the computation quicker
# since the assembly are formed by agglomeration
pre_pruning_assembly_r = list(reversed(pre_pruning_assembly))
nns = len(pre_pruning_assembly_r)
# boolean list with the selected assemblies
selection = [True for _ in range(nns)]
for i in range(nns):
# check only in the range of the already selected assemblies
if selection[i]:
a = pre_pruning_assembly_r[i]['neurons']
for j in range(i + 1, nns):
if selection[j]:
b = pre_pruning_assembly_r[j]['neurons']
# check if a is included in b or vice versa
if set(a).issuperset(set(b)):
selection[j] = False
if set(b).issuperset(set(a)):
selection[i] = False
# only for the case in which significance_pruning=False
if set(a) == set(b):
selection[i] = True
selection[j] = True
assembly_r = []
# put into final_assembly only the selected ones
for i in range(nns):
if selection[i]:
assembly_r.append(pre_pruning_assembly_r[i])
assembly = list(reversed(assembly_r))
return assembly
def _raise_errors(binned_spiketrain, max_lag, alpha, min_occurrences,
size_chunks, max_spikes):
"""
Returns errors if the parameters given in input are not correct.
Parameters
----------
binned_spiketrain : BinnedSpikeTrain object
binned spike trains containing data to be analysed
max_lag : int
maximal lag to be tested. For a binning dimension of bin_size the
method will test all pairs configurations with a time
shift between -max_lag and max_lag
alpha : float
alpha level.
min_occurrences : int
minimal number of occurrences required for an assembly
(all assemblies, even if significant, with fewer occurrences
than min_occurrences are discarded).
size_chunks : int
size (in bins) of chunks in which the spike trains is divided
to compute the variance (to reduce non stationarity effects
on variance estimation)
max_spikes : int
maximal assembly order (the algorithm will return assemblies of
composed by maximum max_spikes elements).
Raises
------
TypeError
if the data is not an elephant.conv.BinnedSpikeTrain object
ValueError
if the maximum lag considered is 1 or less
if the significance level is not in [0,1]
if the minimal number of occurrences for an assembly is less than 1
if the length of the chunks for the variance computation is 1 or less
if the maximal assembly order is not between 2
and the number of neurons
if the time series is too short (less than 100 bins)
"""
if not isinstance(binned_spiketrain, conv.BinnedSpikeTrain):
raise TypeError(
'data must be in BinnedSpikeTrain format')
if max_lag < 2:
raise ValueError('max_lag value cant be less than 2')
if alpha < 0 or alpha > 1:
raise ValueError('significance level has to be in interval [0,1]')
if min_occurrences < 1:
raise ValueError('minimal number of occurrences for an assembly '
'must be at least 1')
if size_chunks < 2:
raise ValueError('length of the chunks cannot be 1 or less')
if max_spikes < 2:
raise ValueError('maximal assembly order must be less than 2')
if binned_spiketrain.shape[1] - max_lag < 100:
raise ValueError('The time series is too short, consider '
'taking a longer portion of spike train '
'or diminish the bin size to be tested')
# alias for the function
cad = cell_assembly_detection
|
INM-6/elephant
|
elephant/cell_assembly_detection.py
|
Python
|
bsd-3-clause
| 48,301
|
[
"NEURON"
] |
2d399a0af5d0139edb252b431019fa921ef305bebeac61d71378cb87ea3f0366
|
#Author: Carlos Eduardo Leão Elmadjian
import numpy as np
#Class to model a single neuron
#------------------------------
class Neuron():
def __init__(self, idx, eta, inputs):
self.idx = idx
self.eta = eta
self.weight = [0.0 for i in range(inputs+1)]
self.input = [1.0 for i in range(inputs+1)]
self.f_links = []
self.b_links = []
self.output = None
self.delta = None
def set_input(self, idx, x):
self.input[idx] = x
def set_weight(self, idx, w):
self.weight[idx] = w
def set_weight_list(self, new_list):
self.weight = new_list
def set_b_links(self, p):
self.b_links.append(p)
def set_f_links(self, p):
self.f_links.append(p)
def get_param(self):
sum = 0
for f in self.f_links:
sum += f.weight[self.idx] * f.delta
return sum
def calculate_delta(self, param):
self.delta = self.output * (1.0 - self.output) * param
def calculate_output(self):
y = np.dot(self.weight, self.input)
self.output = 1/(1 + np.exp(y))
def propagate_output(self):
for l in self.f_links:
l.set_input(self.idx, self.output)
def update_weights(self):
for i in range(len(self.weight)):
self.weight[i] += self.eta * self.delta * self.input[i]
#Calculates the BPG algorithm for exercise 01
#--------------------------------------------
def backpropagation(training_set, n_in, n_out, n_hidden, eta=0.05, epochs=1):
P_hidden = [Neuron(i+1, eta, n_in) for i in range(n_hidden)]
P_out = [Neuron(i+1, eta, n_hidden) for i in range(n_out)]
#setting links
for pi in P_hidden:
for pj in P_out:
pj.set_b_links(pi)
pi.set_f_links(pj)
#manually setting weights
P_out[0].set_weight_list([-0.1, -0.4, 0.1, 0.6])
P_out[1].set_weight_list([0.6, 0.2, -0.1, -0.2])
P_hidden[0].set_weight_list([0.1, -0.2, 0.0, 0.2])
P_hidden[1].set_weight_list([0.2, -0.2, 0.1, 0.3])
P_hidden[2].set_weight_list([0.5, 0.3, -0.4, 0.2])
#training
for i in range(epochs):
for sample in training_set:
#forwarding the input
X = [1.0] + sample[0]
Y = sample[1]
for p in P_hidden:
p.input = X
p.calculate_output()
p.propagate_output()
for p in P_out:
p.calculate_output()
#backwards propagation
for i in range(len(P_out)):
P_out[i].calculate_delta(Y[i] - P_out[i].output)
for p in P_hidden:
p.calculate_delta(p.get_param())
#updating network weights
[p.update_weights() for p in P_out]
[p.update_weights() for p in P_hidden]
print("printing weights after %d epochs:" % epochs)
print("\nP_hidden:")
[print(['%.4f' % w for w in p.weight]) for p in P_hidden]
print("\nP_out:")
[print(['%.4f' % w for w in p.weight]) for p in P_out]
#initial setup
#--------------
def main():
training_set = [[[0.6, 0.1, 0.2], [1, 0]], [[0.1, 0.5, 0.6], [0, 1]]]
backpropagation(training_set, 3, 2, 3, epochs=1)
#-----------------------
if __name__ == "__main__":
main()
|
elmadjian/pcs5735
|
aula2/exercise01.py
|
Python
|
mpl-2.0
| 3,304
|
[
"NEURON"
] |
64d8aa6f25405ca22d1a924f674ef754016e4927076d76990362ba75965b2b90
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from itertools import chain
import operator
import sys
import traceback
import SCons.Errors
import SCons.Node
import SCons.Warnings
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
print_prepare = 0 # set by option --debug=prepare
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats(object):
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
print (fmt % n.stats.__dict__) + str(n)
class Task(object):
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s %s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
executor = self.targets[0].get_executor()
if executor is None:
return
executor.prepare()
for t in executor.get_action_targets():
if print_prepare:
print "Preparing target %s..."%t
for s in t.side_effects:
print "...with side-effect %s..."%s
t.prepare()
for s in t.side_effects:
if print_prepare:
print "...Preparing side-effect %s..."%s
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
# TODO(deprecate): "return True" is the old default behavior;
# change it to NotImplementedError (after running through the
# Deprecation Cycle) so the desired behavior is explicitly
# determined by which concrete subclass is used.
#raise NotImplementedError
msg = ('Taskmaster.Task is an abstract base class; instead of\n'
'\tusing it directly, '
'derive from it and override the abstract methods.')
SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
return True
def execute(self):
"""
Called to execute the task.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
prepare(), executed() or failed().
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.execute()', self.node))
try:
cached_targets = []
for t in self.targets:
if not t.retrieve_from_cache():
break
cached_targets.append(t)
if len(cached_targets) < len(self.targets):
# Remove targets before building. It's possible that we
# partially retrieved targets from the cache, leaving
# them in read-only mode. That might cause the command
# to fail.
#
for t in cached_targets:
try:
t.fs.unlink(t.path)
except (IOError, OSError):
pass
self.targets[0].build()
else:
for t in cached_targets:
t.cached = 1
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
except SCons.Errors.UserError:
raise
except SCons.Errors.BuildError:
raise
except Exception, e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
raise buildError
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
def executed_with_callbacks(self):
"""
Called when the task has been successfully executed and
the Taskmaster instance wants to call the Node's callback
methods.
This may have been a do-nothing operation (to preserve build
order), so we must check the node's state before deciding whether
it was "built", in which case we call the appropriate Node method.
In any event, we always call "visited()", which will handle any
post-visit actions that must take place regardless of whether
or not the target was an actual built target or a source Node.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_with_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
if not t.cached:
t.push_to_cache()
t.built()
t.visited()
if (not print_prepare and
(not hasattr(self, 'options') or not self.options.debug_includes)):
t.release_target_info()
else:
t.visited()
executed = executed_with_callbacks
def failed(self):
"""
Default action when a task fails: stop the build.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
self.fail_stop()
def fail_stop(self):
"""
Explicit stop-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_stop()', self.node))
# Invoke will_not_build() to clean-up the pending children
# list.
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
# Tell the taskmaster to not start any new tasks
self.tm.stop()
# We're stopping because of a build failure, but give the
# calling Task class a chance to postprocess() the top-level
# target under which the build failure occurred.
self.targets = [self.tm.current_top]
self.top = 1
def fail_continue(self):
"""
Explicit continue-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_continue()', self.node))
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
def make_ready_all(self):
"""
Marks all targets in a task ready for execution.
This is used when the interface needs every target Node to be
visited--the canonical example being the "scons -c" option.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
self.out_of_date = self.targets[:]
for t in self.targets:
t.disambiguate().set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets above
s.disambiguate().set_state(NODE_EXECUTING)
def make_ready_current(self):
"""
Marks all targets in a task ready for execution if any target
is not current.
This is the default behavior for building only what's necessary.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.make_ready_current()',
self.node))
self.out_of_date = []
needs_executing = False
for t in self.targets:
try:
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
except EnvironmentError, e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
self.out_of_date.append(t)
needs_executing = True
if needs_executing:
for t in self.targets:
t.set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets in first loop above
s.disambiguate().set_state(NODE_EXECUTING)
else:
for t in self.targets:
# We must invoke visited() to ensure that the node
# information has been computed before allowing the
# parent nodes to execute. (That could occur in a
# parallel build...)
t.visited()
t.set_state(NODE_UP_TO_DATE)
if (not print_prepare and
(not hasattr(self, 'options') or not self.options.debug_includes)):
t.release_target_info()
make_ready = make_ready_current
def postprocess(self):
"""
Post-processes a task after it's been executed.
This examines all the targets just built (or not, we don't care
if the build was successful, or even if there was no build
because everything was up-to-date) to see if they have any
waiting parent Nodes, or Nodes waiting on a common side effect,
that can be put back on the candidates list.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
# targets each parent was waiting for so we can subtract the
# values later, and so we *don't* put waiting side-effect Nodes
# back on the candidates list if the Node is also a waiting
# parent.
targets = set(self.targets)
pending_children = self.tm.pending_children
parents = {}
for t in targets:
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
if T: T.write(self.trace_message(u'Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
for p in t.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for t in targets:
if t.side_effects is not None:
for s in t.side_effects:
if s.get_state() == NODE_EXECUTING:
s.set_state(NODE_NO_STATE)
for p in s.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for p in s.waiting_s_e:
if p.ref_count == 0:
self.tm.candidates.append(p)
for p, subtract in parents.items():
p.ref_count = p.ref_count - subtract
if T: T.write(self.trace_message(u'Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
self.tm.candidates.append(p)
for t in targets:
t.postprocess()
# Exception handling subsystem.
#
# Exceptions that occur while walking the DAG or examining Nodes
# must be raised, but must be raised at an appropriate time and in
# a controlled manner so we can, if necessary, recover gracefully,
# possibly write out signature information for Nodes we've updated,
# etc. This is done by having the Taskmaster tell us about the
# exception, and letting
def exc_info(self):
"""
Returns info about a recorded exception.
"""
return self.exception
def exc_clear(self):
"""
Clears any recorded exception.
This also changes the "exception_raise" attribute to point
to the appropriate do-nothing method.
"""
self.exception = (None, None, None)
self.exception_raise = self._no_exception_to_raise
def exception_set(self, exception=None):
"""
Records an exception to be raised at the appropriate time.
This also changes the "exception_raise" attribute to point
to the method that will, in fact
"""
if not exception:
exception = sys.exc_info()
self.exception = exception
self.exception_raise = self._exception_raise
def _no_exception_to_raise(self):
pass
def _exception_raise(self):
"""
Raises a pending exception that was recorded while getting a
Task ready for execution.
"""
exc = self.exc_info()[:]
try:
exc_type, exc_value, exc_traceback = exc
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
raise exc_type, exc_value, exc_traceback
class AlwaysTask(Task):
def needs_execute(self):
"""
Always returns True (indicating this Task should always
be executed).
Subclasses that need this behavior (as opposed to the default
of only executing Nodes that are out of date w.r.t. their
dependencies) can use this as follows:
class MyTaskSubclass(SCons.Taskmaster.Task):
needs_execute = SCons.Taskmaster.Task.execute_always
"""
return True
class OutOfDateTask(Task):
def needs_execute(self):
"""
Returns True (indicating this Task should be executed) if this
Task's target state indicates it needs executing, which has
already been determined by an earlier up-to-date check.
"""
return self.targets[0].get_state() == SCons.Node.executing
def find_cycle(stack, visited):
if stack[-1] in visited:
return None
visited.add(stack[-1])
for n in stack[-1].waiting_parents:
stack.append(n)
if stack[0] == stack[-1]:
return stack
if find_cycle(stack, visited):
return stack
stack.pop()
return None
class Taskmaster(object):
"""
The Taskmaster for walking the dependency DAG.
"""
def __init__(self, targets=[], tasker=None, order=None, trace=None):
self.original_top = targets
self.top_targets_left = targets[:]
self.top_targets_left.reverse()
self.candidates = []
if tasker is None:
tasker = OutOfDateTask
self.tasker = tasker
if not order:
order = lambda l: l
self.order = order
self.message = None
self.trace = trace
self.next_candidate = self.find_next_candidate
self.pending_children = set()
def find_next_candidate(self):
"""
Returns the next candidate Node for (potential) evaluation.
The candidate list (really a stack) initially consists of all of
the top-level (command line) targets provided when the Taskmaster
was initialized. While we walk the DAG, visiting Nodes, all the
children that haven't finished processing get pushed on to the
candidate list. Each child can then be popped and examined in
turn for whether *their* children are all up-to-date, in which
case a Task will be created for their actual evaluation and
potential building.
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined. This is used, for example, when
invoking SCons in a source directory. A source directory Node can
return its corresponding build directory Node, essentially saying,
"Hey, you really need to build this thing over here instead."
"""
try:
return self.candidates.pop()
except IndexError:
pass
try:
node = self.top_targets_left.pop()
except IndexError:
return None
self.current_top = node
alt, message = node.alter_targets()
if alt:
self.message = message
self.candidates.append(node)
self.candidates.extend(self.order(alt))
node = self.candidates.pop()
return node
def no_next_candidate(self):
"""
Stops Taskmaster processing by not returning a next candidate.
Note that we have to clean-up the Taskmaster candidate list
because the cycle detection depends on the fact all nodes have
been processed somehow.
"""
while self.candidates:
candidates = self.candidates
self.candidates = []
self.will_not_build(candidates)
return None
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, lets imagine a graph made of
three node (A, B and C) making a cycle. The evaluation starts
at node A. The taskmaster first consider whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like:
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph:
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
def trace_message(self, message):
return 'Taskmaster: %s\n' % message
def trace_node(self, node):
return '<%-10s %-3s %s>' % (StateString[node.get_state()],
node.ref_count,
repr(str(node)))
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node, 'stats'):
node.stats = Stats()
StatsNodes.append(node)
S = node.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception, e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
#if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None
def next_task(self):
"""
Returns the next task to be executed.
This simply asks for the next Node to be evaluated, and then wraps
it in the specific Task subclass with which we were initialized.
"""
node = self._find_next_ready_node()
if node is None:
return None
executor = node.get_executor()
if executor is None:
return None
tlist = executor.get_all_targets()
task = self.tasker(self, tlist, node in self.original_top, node)
try:
task.make_ready()
except:
# We had a problem just trying to get this task ready (like
# a child couldn't be linked in to a VariantDir when deciding
# whether this node is current). Arrange to raise the
# exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if self.ready_exc:
task.exception_set(self.ready_exc)
self.ready_exc = None
return task
def will_not_build(self, nodes, node_func=lambda n: None):
"""
Perform clean-up about nodes that will never be built. Invokes
a user defined function on all of these nodes (including all
of their parents).
"""
T = self.trace
pending_children = self.pending_children
to_visit = set(nodes)
pending_children = pending_children - to_visit
if T:
for n in nodes:
T.write(self.trace_message(' removing node %s from the pending children set\n' %
self.trace_node(n)))
try:
while len(to_visit):
node = to_visit.pop()
node_func(node)
# Prune recursion by flushing the waiting children
# list immediately.
parents = node.waiting_parents
node.waiting_parents = set()
to_visit = to_visit | parents
pending_children = pending_children - parents
for p in parents:
p.ref_count = p.ref_count - 1
if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
self.trace_node(p)))
except KeyError:
# The container to_visit has been emptied.
pass
# We have the stick back the pending_children list into the
# taskmaster because the python 1.5.2 compatibility does not
# allow us to use in-place updates
self.pending_children = pending_children
def stop(self):
"""
Stops the current build completely.
"""
self.next_candidate = self.no_next_candidate
def cleanup(self):
"""
Check for dependency cycles.
"""
if not self.pending_children:
return
nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
genuine_cycles = [
node for node,cycle in nclist
if cycle or node.get_state() != NODE_EXECUTED
]
if not genuine_cycles:
# All of the "cycles" found were single nodes in EXECUTED state,
# which is to say, they really weren't cycles. Just return.
return
desc = 'Found dependency cycle(s):\n'
for node, cycle in nclist:
if cycle:
desc = desc + " " + " -> ".join(map(str, cycle)) + "\n"
else:
desc = desc + \
" Internal Error: no cycle found for node %s (%s) in state %s\n" % \
(node, repr(node), StateString[node.get_state()])
raise SCons.Errors.UserError(desc)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
andrewyoung1991/scons
|
src/engine/SCons/Taskmaster.py
|
Python
|
mit
| 40,474
|
[
"VisIt"
] |
205bcd327588e865fc62046ce8cf8c6dab05f2c5f47438f9803eb2549aaede46
|
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
# Copyright 2012-2020 Ronald Römer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.platform.startswith('linux'):
sys.path.extend(sys.argv[1:])
elif sys.platform.startswith('win'):
sys.path.extend([sys.argv[1],
'/'.join(sys.argv[2:])])
import vtkboolPython
vtkboolPython.vtkPolyDataBooleanFilter()
#import vtk
|
zippy84/vtkbool
|
testing/test_py_module.py
|
Python
|
apache-2.0
| 893
|
[
"VTK"
] |
89b22a9e21d8696e731bdbf114e51405d41bc3152a86b81fac77b099dd9a1959
|
"""
The Job Path executor determines the chain of Optimizing Agents that must
work on the job prior to the scheduling decision.
Initially this takes jobs in the received state and starts the jobs on the
optimizer chain.
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import List
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
class JobPath(OptimizerExecutor):
"""
The specific Optimizer must provide the following methods:
- optimizeJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
@classmethod
def initializeOptimizer(cls):
cls.__voPlugins = {}
return S_OK()
def __setOptimizerChain(self, jobState, opChain):
if not isinstance(opChain, str):
opChain = ",".join(opChain)
return jobState.setOptParameter("OptimizerChain", opChain)
def __executeVOPlugin(self, voPlugin, jobState):
if voPlugin not in self.__voPlugins:
modName = List.fromChar(voPlugin, ".")[-1]
try:
module = __import__(voPlugin, globals(), locals(), [modName])
except ImportError:
self.jobLog.exception("Could not import VO plugin", voPlugin)
return S_ERROR("Could not import VO plugin")
try:
self.__voPlugins[voPlugin] = getattr(module, modName)
except AttributeError as excp:
return S_ERROR("Could not get plugin %s from module %s: %s" % (modName, voPlugin, str(excp)))
argsDict = {"JobID": jobState.jid, "JobState": jobState, "ConfigPath": self.ex_getProperty("section")}
try:
modInstance = self.__voPlugins[voPlugin](argsDict)
result = modInstance.execute()
except Exception:
self.jobLog.exception("Excp while executing", voPlugin)
return S_ERROR("Could not execute VO plugin")
if not result["OK"]:
return result
extraPath = result["Value"]
if isinstance(extraPath, str):
extraPath = List.fromChar(result["Value"])
return S_OK(extraPath)
def optimizeJob(self, jid, jobState):
result = jobState.getManifest()
if not result["OK"]:
self.jobLog.error("Failed to get job manifest", result["Message"])
return result
jobManifest = result["Value"]
opChain = jobManifest.getOption("JobPath", [])
if opChain:
self.jobLog.info("Job defines its own optimizer chain", opChain)
result = self.__setOptimizerChain(jobState, opChain)
if not result["OK"]:
self.jobLog.error("Failed to set optimizer chain", result["Message"])
return result
return self.setNextOptimizer(jobState)
# Construct path
opPath = self.ex_getOption("BasePath", ["JobPath", "JobSanity"])
voPlugin = self.ex_getOption("VOPlugin", "")
# Specific VO path
if voPlugin:
result = self.__executeVOPlugin(voPlugin, jobState)
if not result["OK"]:
return result
extraPath = result["Value"]
if extraPath:
opPath.extend(extraPath)
self.jobLog.verbose("Adding extra VO specific optimizers to path", extraPath)
else:
# Generic path: Should only rely on an input data setting in absence of VO plugin
self.jobLog.verbose("No VO specific plugin module specified")
result = jobState.getInputData()
if not result["OK"]:
self.jobLog.error("Failed to get input data", result["Message"])
return result
if result["Value"]:
# if the returned tuple is not empty it will evaluate true
self.jobLog.info("Input data requirement found")
opPath.extend(self.ex_getOption("InputData", ["InputData"]))
else:
self.jobLog.info("No input data requirement")
# End of path
opPath.extend(self.ex_getOption("EndPath", ["JobScheduling"]))
uPath = []
for opN in opPath:
if opN not in uPath:
uPath.append(opN)
opPath = uPath
self.jobLog.info("Constructed path is", "%s" % "->".join(opPath))
result = self.__setOptimizerChain(jobState, opPath)
if not result["OK"]:
self.jobLog.error("Failed to set optimizer chain", result["Message"])
return result
return self.setNextOptimizer(jobState)
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Executor/JobPath.py
|
Python
|
gpl-3.0
| 4,662
|
[
"DIRAC"
] |
44f17cab34f8436bda86d434eff37364a58a1c394d82bd4d55be9850da722024
|
"""
Determine which packages need updates after pinning change
"""
import enum
from itertools import chain
import logging
import re
import string
from .utils import RepoData
# FIXME: trim_build_only_deps is not exported via conda_build.api!
# Re-implement it here or ask upstream to export that functionality.
from conda_build.metadata import trim_build_only_deps
# for type checking
from typing import AbstractSet, List, Set
from .recipe import Recipe, RecipeError
from conda_build.metadata import MetaData
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_build_variants(meta: MetaData) -> Set[str]:
# This is the same behavior as in
# conda_build.metadata.Metadata.get_hash_contents but without leaving out
# "build_string_excludes" (python, r_base, etc.).
dependencies = set(meta.get_used_vars())
trim_build_only_deps(meta, dependencies)
return dependencies
def skip_for_variants(meta: MetaData, variant_keys: AbstractSet[str]) -> bool:
"""Check if the recipe uses any given variant keys
Args:
meta: Variant MetaData object
Returns:
True if any variant key from variant_keys is used
"""
dependencies = _get_build_variants(meta)
return not dependencies.isdisjoint(variant_keys)
def will_build_variant(meta: MetaData) -> bool:
"""Check if the recipe variant will be built as currently rendered
Args:
meta: Variant MetaData object
Returns:
True if all extant build numbers are smaller than the one indicated
by the variant MetaData.
"""
build_numbers = RepoData().get_package_data(
'build_number',
name=meta.name(), version=meta.version(),
platform=['linux', 'noarch'],
)
current_num = int(meta.build_number())
res = all(num < current_num for num in build_numbers)
if res:
logger.debug("Package %s=%s will be built already because %s < %s)",
meta.name(), meta.version(),
max(build_numbers) if build_numbers else "N/A",
meta.build_number())
return res
_legacy_build_string_prefixes = re.compile(
'''
^
(
(?P<numpy> np [0-9]{2,9}) |
(?P<python> py [0-9]{2,9}) |
(?P<perl> pl [0-9]{2,9}) |
(?P<lua> lua [0-9]{2,9}) |
(?P<r_base> r [0-9]{2,9}) |
(?P<mro_base> mro [0-9]{3,9})
)*
''',
re.X,
)
# TODO: clean this mess up
def _have_partially_matching_build_id(meta):
# Stupid legacy special handling:
res = RepoData().get_package_data(
'build',
name=meta.name(), version=meta.version(),
build_number=meta.build_number(),
platform=['linux', 'noarch'],
)
is_noarch = bool(meta.noarch)
current_build_id = meta.build_id()
current_matches = _legacy_build_string_prefixes.match(current_build_id)
current_prefixes = current_matches.groupdict()
# conda-build add "special" substrings for some packages to the build
# string (e.g., "py38", "pl526", ...). When we use `bypass_env_check` then
# it does not add those substrings somehow (?).
# But during the actual build, it adds those substrings even for run-only
# dependencies (see "blast" recipe with its "perl" run-dep for example).
#
# FIXME: The following is probably how it _should_ be done.
# But we still have a lot of recipes that only define a requirements/build
# without a requirements/host section. conda-build seems to do create
# host/build env how it sees fit then. E.g.:
# conda search 'gmap=2017.10.30[build_number=6,subdir=linux-64]'|tail -n1
# Loading channels: done
# # Name Version Build Channel
# gmap 2017.10.30 h2f06484_6 bioconda
# $ conda search 'gmap=2017.10.30[build_number=6,subdir=osx-64]'|tail -n1
# Loading channels: done
# # Name Version Build Channel
# gmap 2017.10.30 pl526h977ceac_6 bioconda
#
# build_deps = [
# dep.split()[0].replace('-', '_')
# for dep in
# chain(
# meta.get_value('requirements/build', []),
# meta.get_value('requirements/host', []),
# )
# ]
# # noarch:python is handled in have_noarch_python_build_number
# # but we might have noarch:generic recipes that use python.
# # It probably doesn't matter which python is chosen then, so
# # we also trim the "py*" prefix in that case here.
# if is_noarch and current_prefixes['python']:
# current_build_id = current_build_id.replace(current_prefixes['python'], '')
#
# def is_matching_trimmed_build_id(build_id, current_build_id):
# matches = _legacy_build_string_prefixes.match(build_id)
# trimmed_build_id = build_id
# for prefix_key, prefix in matches.groupdict().items():
# if prefix:
# if prefix_key in build_deps:
# continue
# if not (is_noarch and prefix_key == 'python'):
# continue
# trimmed_build_id = trimmed_build_id.replace(prefix, '')
# if trimmed_build_id.startswith('_'):
# # If we trimmed everything but the number, no '_' is inserted.
# trimmed_build_id = trimmed_build_id[1:]
# if trimmed_build_id == current_build_id:
# return True
# return False
def is_matching_trimmed_build_id(build_id, current_build_id):
matches = _legacy_build_string_prefixes.match(build_id)
trimmed_build_id = build_id
trimmed_current_build_id = current_build_id
for prefix_key, prefix in matches.groupdict().items():
current_prefix = current_prefixes[prefix_key]
if prefix != current_prefix:
if prefix and current_prefix:
# noarch:python is handled in have_noarch_python_build_number
# but we might have noarch:generic recipes that use python.
# It probably doesn't matter which python is chosen then, so
# we also trim the "py*" prefix in that case here.
if not (is_noarch and prefix_key == 'python'):
return False
if prefix:
trimmed_build_id = trimmed_build_id.replace(prefix, '')
if current_prefix:
trimmed_current_build_id = trimmed_current_build_id.replace(current_prefix, '')
if trimmed_build_id.startswith('_'):
# If we trimmed everything but the number, no '_' is inserted.
trimmed_build_id = trimmed_build_id[1:]
if trimmed_current_build_id.startswith('_'):
# If we trimmed everything but the number, no '_' is inserted.
trimmed_current_build_id = trimmed_current_build_id[1:]
if trimmed_build_id == trimmed_current_build_id:
return True
return False
for build_id in res:
if is_matching_trimmed_build_id(build_id, current_build_id):
logger.debug("Package %s=%s=%s exists",
meta.name(), meta.version(), build_id)
return True
return False
def have_variant(meta: MetaData) -> bool:
"""Checks if we have an exact match to name/version/buildstring
Args:
meta: Variant MetaData object
Returns:
True if the variant's build string exists already in the repodata
"""
res = RepoData().get_package_data(
name=meta.name(), version=meta.version(), build=meta.build_id(),
platform=['linux', 'noarch']
)
if res:
logger.debug("Package %s=%s=%s exists",
meta.name(), meta.version(), meta.build_id())
return True
return _have_partially_matching_build_id(meta)
def have_noarch_python_build_number(meta: MetaData) -> bool:
"""Checks if we have a noarch:python build with same version+build_number
Args:
meta: Variant MetaData object
Returns:
True if noarch:python and version+build_number exists already in repodata
"""
if meta.get_value('build/noarch') != 'python':
return False
res = RepoData().get_package_data(
name=meta.name(), version=meta.version(),
build_number=meta.build_number(),
platform=['noarch'],
)
if res:
logger.debug("Package %s=%s[build_number=%s, subdir=noarch] exists",
meta.name(), meta.version(), meta.build_number())
return res
def will_build_only_missing(metas: List[MetaData]) -> bool:
"""Checks if only new builds will be added (no divergent build ids exist)
Args:
metas: List of Variant MetaData objects
Returns:
True if no divergent build strings exist in repodata
"""
builds = {
(meta.name(), meta.version(), meta.build_number())
for meta in metas
}
existing_builds = set()
for name, version, build_number in builds:
existing_builds.update(
map(
tuple,
RepoData().get_package_data(
["name", "version", "build"],
name=name, version=version, build_number=build_number,
platform=['linux', 'noarch'],
),
),
)
new_builds = {
(meta.name(), meta.version(), meta.build_id())
for meta in metas
}
return new_builds.issuperset(existing_builds)
class State(enum.Flag):
"""Recipe Pinning State"""
#: Recipe had a failure rendering
FAIL = enum.auto()
#: Recipe has a variant that will be skipped
SKIP = enum.auto()
#: Recipe has a variant that exists already
HAVE = enum.auto()
#: Recipe has a variant that was bumped already
BUMPED = enum.auto()
#: Recipe has a variant that needs bumping
BUMP = enum.auto()
#: Recipe has a noarch:python variant that exists already
HAVE_NOARCH_PYTHON = enum.auto()
def needs_bump(self) -> bool:
"""Checks if the state indicates that the recipe needs to be bumped
"""
return self & self.BUMP
def failed(self) -> bool:
"""True if the update pinning check failed"""
return self & self.FAIL
allowed_build_string_characters = frozenset(
string.digits + string.ascii_uppercase + string.ascii_lowercase + '_.'
)
def has_invalid_build_string(meta: MetaData) -> bool:
build_string = meta.build_id()
return not (build_string and set(build_string).issubset(allowed_build_string_characters))
def check(
recipe: Recipe,
build_config,
keep_metas=False,
skip_variant_keys: AbstractSet[str] = frozenset(),
) -> State:
"""Determine if a given recipe should have its build number increments
(bumped) due to a recent change in pinnings.
Args:
recipe: The recipe to check
build_config: conda build config object
keep_metas: If true, `Recipe.conda_release` is not called
skip_variant_keys: Variant keys to skip a recipe for if they are used
Returns:
Tuple of state and a the input recipe
"""
try:
logger.debug("Calling Conda to render %s", recipe)
maybe_metas = recipe.conda_render(config=build_config)
logger.debug("Finished rendering %s", recipe)
except RecipeError as exc:
logger.error(exc)
return State.FAIL, recipe
except Exception as exc:
logger.exception("update_pinnings.check failed with exception in api.render(%s):", recipe)
return State.FAIL, recipe
if maybe_metas is None:
logger.error("Failed to render %s. Got 'None' from recipe.conda_render()", recipe)
return State.FAIL, recipe
metas = [meta for meta, _, _ in maybe_metas]
if any(has_invalid_build_string(meta) for meta in metas):
logger.error(
"Failed to get build strings for %s with bypass_env_check. "
"Probably needs build/skip instead of dep constraint.",
recipe,
)
return State.FAIL, recipe
flags = State(0)
maybe_bump = False
for meta in metas:
if meta.skip() or skip_for_variants(meta, skip_variant_keys):
flags |= State.SKIP
elif have_noarch_python_build_number(meta):
flags |= State.HAVE_NOARCH_PYTHON
elif have_variant(meta):
flags |= State.HAVE
elif will_build_variant(meta):
flags |= State.BUMPED
else:
logger.info("Package %s=%s=%s missing!",
meta.name(), meta.version(), meta.build_id())
maybe_bump = True
if maybe_bump:
# Skip bump if we only add to the build matrix.
if will_build_only_missing(metas):
flags |= State.BUMPED
else:
flags |= State.BUMP
if not keep_metas:
recipe.conda_release()
return flags, recipe
|
bioconda/bioconda-utils
|
bioconda_utils/update_pinnings.py
|
Python
|
mit
| 13,007
|
[
"BLAST",
"Bioconda"
] |
385038da29598c62c18cb92f8fa4f5418157ed504b48fa4125ed1fd4911f92f3
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import numpy, time, pickle, gzip, sys, os, copy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import logging
class MixtureDensityOutputLayer(object):
def __init__(self, rng, input, n_in, n_out, n_component):
self.input = input
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out*n_component))
self.W_mu = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W_mu', borrow=True)
self.W_sigma = theano.shared(value=numpy.asarray(W_value.copy(), dtype=theano.config.floatX), name='W_sigma', borrow=True)
W_mix_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_component))
self.W_mix = theano.shared(value=numpy.asarray(W_mix_value, dtype=theano.config.floatX), name='W_mix', borrow=True)
self.mu = T.dot(self.input, self.W_mu) #assume linear output for mean vectors
self.sigma = T.nnet.softplus(T.dot(self.input, self.W_sigma)) # + 0.0001
#self.sigma = T.exp(T.dot(self.input, self.W_sigma)) # + 0.0001
self.mix = T.nnet.softmax(T.dot(self.input, self.W_mix))
self.delta_W_mu = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_mu')
self.delta_W_sigma = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_sigma')
self.delta_W_mix = theano.shared(value = numpy.zeros((n_in, n_component),
dtype=theano.config.floatX), name='delta_W_mix')
self.params = [self.W_mu, self.W_sigma, self.W_mix]
self.delta_params = [self.delta_W_mu, self.delta_W_sigma, self.delta_W_mix]
class LinearLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out))
W = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class SigmoidLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = T.tanh):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_value,
name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.output = activation(self.output)
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class GeneralLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = 'linear'):
self.input = input
self.n_in = n_in
self.n_out = n_out
self.logger = logging.getLogger('general_layer')
# randomly initialise the activation weights based on the input size, as advised by the 'tricks of neural network book'
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if activation == 'sigmoid':
self.output = T.nnet.sigmoid(lin_output)
elif activation == 'tanh':
self.output = T.tanh(lin_output)
elif activation == 'linear':
self.output = lin_output
elif activation == 'ReLU': ## rectifier linear unit
self.output = T.maximum(0.0, lin_output)
elif activation == 'ReSU': ## rectifier smooth unit
self.output = numpy.log(1.0 + numpy.exp(lin_output))
else:
self.logger.critical('the input activation function: %s is not supported right now. Please modify layers.py to support' % (activation))
raise
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
errors = T.mean(T.sum((self.output-y)**2, axis=1))
return errors
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1,
do_pnorm = False, pnorm_order = 1):
""" Class for hidden layer """
self.input = input
self.n_in = n_in
self.n_out = n_out
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = activation(self.tmp_output)
elif do_pnorm == True:
self.last_start = n_out - pool_size
self.tmp_output = abs(lin_output[:,0:self.last_start+1:pool_size]) ** pnorm_order
for i in range(1, pool_size):
cur = abs(lin_output[:,i:self.last_start+i+1:pool_size]) ** pnorm_order
self.tmp_output = self.tmp_output + cur
self.tmp_output = self.tmp_output ** (1.0 / pnorm_order)
self.output = activation(self.tmp_output)
else:
self.output = (lin_output if activation is None
else activation(lin_output))
# self.output = self.rectifier_linear(lin_output)
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def rectifier_linear(self, x):
x = T.maximum(0.0, x)
return x
def rectifier_smooth(self, x):
x = numpy.log(1.0 + numpy.exp(x))
return x
class dA(object):
def __init__(self, numpy_rng, theano_rng = None, input = None,
n_visible= None, n_hidden= None, W = None, bhid = None,
bvis = None, firstlayer = 0, variance = None ):
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng :
theano_rng = RandomStreams(numpy_rng.randint(2**30))
if not W:
initial_W = numpy.asarray( numpy_rng.uniform(
low = -4*numpy.sqrt(6./(n_hidden+n_visible)),
high = 4*numpy.sqrt(6./(n_hidden+n_visible)),
size = (n_visible, n_hidden)),
dtype = theano.config.floatX)
W = theano.shared(value = initial_W, name ='W')
if not bvis:
bvis = theano.shared(value = numpy.zeros(n_visible,
dtype = theano.config.floatX))
if not bhid:
bhid = theano.shared(value = numpy.zeros(n_hidden,
dtype = theano.config.floatX), name ='b')
self.W = W
self.b = bhid
self.b_prime = bvis
self.W_prime = self.W.T
self.theano_rng = theano_rng
if input == None :
self.x = T.dmatrix(name = 'input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
# first layer, use Gaussian noise
self.firstlayer = firstlayer
if self.firstlayer == 1 :
if variance == None :
self.var = T.vector(name = 'input')
else :
self.var = variance
else :
self.var = None
def get_corrupted_input(self, input, corruption_level):
if self.firstlayer == 0 :
return self.theano_rng.binomial(
size = input.shape,
n = 1,
p = 1 - corruption_level,
dtype=theano.config.floatX) * input
else :
noise = self.theano_rng.normal( size = input.shape,
dtype = theano.config.floatX)
denoises = noise * self.var * corruption_level
return input+denoises
def get_hidden_values(self, input):
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden ):
if self.firstlayer == 1 :
return T.dot(hidden, self.W_prime) + self.b_prime
else :
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate):
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values( tilde_x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1 )
cost = T.mean(L) / 2
gparams = T.grad(cost, self.params)
updates = {}
for param, gparam in zip(self.params, gparams):
updates[param] = param - learning_rate*gparam
return (cost, updates)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
def get_test_cost(self, corruption_level):
""" This function computes the cost and the updates for one trainng
step of the dA """
# tilde_x = self.get_corrupted_input(self.x, corruption_level, 0.5)
y = self.get_hidden_values( self.x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1)
cost = T.mean(L)
return cost
|
bajibabu/merlin
|
src/layers/mdn_layers.py
|
Python
|
apache-2.0
| 15,599
|
[
"Gaussian"
] |
fdeaa08f14ac4ff99a9ebae7ba9f89297fc982e60b4fd364791d525f34b05aab
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provider related utilities
"""
from libcloud.utils.misc import get_driver as _get_provider_driver
from libcloud.utils.misc import set_driver as _set_provider_driver
from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS
from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
__all__ = [
"Provider",
"DRIVERS",
"get_driver"]
DRIVERS = {
Provider.AZURE:
('libcloud.compute.drivers.azure', 'AzureNodeDriver'),
Provider.DUMMY:
('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
Provider.EC2_US_EAST:
('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
Provider.EC2_EU_WEST:
('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'),
Provider.EC2_US_WEST:
('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'),
Provider.EC2_US_WEST_OREGON:
('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'),
Provider.EC2_AP_SOUTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'),
Provider.EC2_AP_NORTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'),
Provider.EC2_SA_EAST:
('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'),
Provider.EC2_AP_SOUTHEAST2:
('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'),
Provider.ECP:
('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
Provider.ELASTICHOSTS:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
Provider.ELASTICHOSTS_UK1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'),
Provider.ELASTICHOSTS_UK2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'),
Provider.ELASTICHOSTS_US1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'),
Provider.ELASTICHOSTS_US2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'),
Provider.ELASTICHOSTS_US3:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'),
Provider.ELASTICHOSTS_CA1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'),
Provider.ELASTICHOSTS_AU1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'),
Provider.ELASTICHOSTS_CN1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'),
Provider.SKALICLOUD:
('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
Provider.SERVERLOVE:
('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
Provider.CLOUDSIGMA:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
Provider.GCE:
('libcloud.compute.drivers.gce', 'GCENodeDriver'),
Provider.GOGRID:
('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
Provider.RACKSPACE:
('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
Provider.RACKSPACE_FIRST_GEN:
('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
Provider.HPCLOUD:
('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'),
Provider.KILI:
('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
Provider.VPSNET:
('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
Provider.LINODE:
('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
Provider.RIMUHOSTING:
('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
Provider.VOXEL:
('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
Provider.SOFTLAYER:
('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
Provider.EUCALYPTUS:
('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
Provider.IBM:
('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'),
Provider.OPENNEBULA:
('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
Provider.DREAMHOST:
('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'),
Provider.BRIGHTBOX:
('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
Provider.NIMBUS:
('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
Provider.BLUEBOX:
('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
Provider.GANDI:
('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
Provider.OPSOURCE:
('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'),
Provider.DIMENSIONDATA:
('libcloud.compute.drivers.dimensiondata', 'DimensionDataNodeDriver'),
Provider.OPENSTACK:
('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
Provider.NINEFOLD:
('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'),
Provider.VCLOUD:
('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
Provider.TERREMARK:
('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
Provider.CLOUDSTACK:
('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
Provider.LIBVIRT:
('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
Provider.JOYENT:
('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
Provider.VCL:
('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
Provider.KTUCLOUD:
('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
Provider.HOSTVIRTUAL:
('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
Provider.ABIQUO:
('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
Provider.DIGITAL_OCEAN:
('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
Provider.NEPHOSCALE:
('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
Provider.CLOUDFRAMES:
('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'),
Provider.EXOSCALE:
('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
Provider.IKOULA:
('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
Provider.OUTSCALE_SAS:
('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
Provider.OUTSCALE_INC:
('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
Provider.VSPHERE:
('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'),
Provider.PROFIT_BRICKS:
('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'),
Provider.VULTR:
('libcloud.compute.drivers.vultr', 'VultrNodeDriver'),
Provider.AURORACOMPUTE:
('libcloud.compute.drivers.auroracompute', 'AuroraComputeNodeDriver'),
# Deprecated
Provider.CLOUDSIGMA_US:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'),
}
def get_driver(provider):
if provider in DEPRECATED_RACKSPACE_PROVIDERS:
id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()])
old_name = id_to_name_map[provider]
new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]]
url = 'http://s.apache.org/lc0140un'
msg = ('Provider constant %s has been removed. New constant '
'is now called %s.\n'
'For more information on this change and how to modify your '
'code to work with it, please visit: %s' %
(old_name, new_name, url))
raise Exception(msg)
return _get_provider_driver(DRIVERS, provider)
def set_driver(provider, module, klass):
return _set_provider_driver(DRIVERS, provider, module, klass)
|
Hybrid-Cloud/badam
|
patches_tool/aws_patch/aws_deps/libcloud/compute/providers.py
|
Python
|
apache-2.0
| 8,232
|
[
"VisIt"
] |
efe1764a556b904ad5fdb1abaee3576828973092f838b918b6ef0cc92a491e84
|
"""
Elastic basis pursuit
"""
import numpy as np
import numpy.linalg as nla
import leastsqbound as lsq
import sklearn.linear_model as lm
import scipy.optimize as opt
def err_func(params, x, y, func):
"""
Error function for fitting a function
Parameters
----------
params : tuple
A tuple with the parameters of `func` according to their order of
input
x : float array
An independent variable.
y : float array
The dependent variable.
func : function
A function with inputs: `(x, *params)`
Returns
-------
The sum of squared marginals of the fit to x/y given the params
"""
# We ravel both, so that we can accomodate multi-d input without having
# to think about it:
return np.ravel(y) - np.ravel(func(x, params))
def gaussian_kernel(x, params):
"""
A multi-dimensional Gaussian kernel function
Useful for creating and testing EBP with simple Gaussian Mixture Models
Parameters
----------
x : ndarray
The independent variable over which the Gaussian is calculated
params : ndarray
If this is a 1D array, it could have one of few things:
[mu_1, mu_2, ... mu_n, sigma_1, sigma_2, ... sigma_n]
Or:
[mu_1, mu_2, ... mu_n, var_covar_matrix]
where:
var_covar_matrix needs to be reshaped into n-by-n
"""
mu = np.asarray(params[:x.shape[0]])
if len(params) == x.shape[0] * 2:
sigma = np.diag(params[x.shape[0]:])
elif len(params) == x.shape[0] + x.shape[0] ** 2:
mu = params[:x.shape[0]]
sigma = np.reshape(params[x.shape[0]:], (x.shape[0], x.shape[0]))
else:
e_s = "Inputs to gaussian_kernel don't have the right dimensions"
raise ValueError(e_s)
dims = mu.shape[0]
while len(mu.shape) < len(x.shape):
mu = mu[..., None]
shape_tuple = x.shape[1:]
diff = (x - mu).reshape(x.shape[0], -1)
sigma_inv = nla.inv(sigma)
mult1 = np.dot(diff.T, sigma_inv)
mult2 = (np.diag(np.dot(mult1, diff))).reshape(shape_tuple)
norm_factor = 1/(np.sqrt((2*np.pi)**dims * nla.det(sigma)))
gauss = norm_factor * np.exp(-0.5 * mult2)
return gauss
def leastsq_oracle(x, y, kernel, initial=None, bounds=None):
"""
This is a generic oracle function that uses bounded least squares to find
the parameters in each iteration of EBP, and requires initial parameters.
Parameters
----------
x : ndarray
Input to the kernel function.
y : ndarray
Data to fit to.
kernel : callalble
The kernel function to be specified by this oracle.
initial : list/array
initial setting for the parameters of the function. This has to be
something that kernel knows what to do with.
"""
return lsq.leastsqbound(err_func, initial, args=(x, y, kernel),
bounds=bounds)[0]
def mixture_of_kernels(x, betas, params, kernel):
"""
Generate the signal from a mixture of kernels
Parameters
----------
x : ndarray
betas : 1D array
Coefficients for the linear summation of the kernels
params : list
A set of parameters for each one of the kernels
kernel : callable
"""
betas = np.asarray(betas)
out = np.zeros(x.shape[1:])
for i in xrange(betas.shape[0]):
out += np.dot(betas[i], kernel(x, params[i]))
return out
def kernel_err(y, x, betas, params, kernel):
"""
An error function for a mixture of kernels, each one parameterized by its
own set of params, and weighted by a beta
Note
----
For a given set of betas, params, this can be used as a within set error
function, or to estimate the cross-validation error against another set of
y, x values, sub-sampled from the whole original set, or from a left-out
portion
"""
return y - mixture_of_kernels(x, betas, params, kernel)
def parameters_to_regressors(x, kernel, params):
"""
Maps from parameters to regressors through the kernel function
Parameters
----------
x : ndarray
Input
kernel : callable
The kernel function
params : list
The parameters for each one of the kernel functions
"""
# Ravel the secondary dimensions of this:
x = x.reshape(x.shape[0], -1)
regressors = np.zeros((len(params), x.shape[-1]))
for i, p in enumerate(params):
regressors[i] = kernel(x, p)
return regressors.T
def solve_nnls(x, y, kernel=None, params=None, design=None):
"""
Solve the mixture problem using NNLS
Parameters
----------
x : ndarray
y : ndarray
kernel : callable
params : list
"""
if design is None and (kernel is None or params is None):
e_s = "Need to provide either design matrix, or kernel and list of"
e_s += "params for generating the design matrix"
raise ValueError(e_s)
if design is None:
A = parameters_to_regressors(x, kernel, params)
else:
A = design
y = y.ravel()
beta_hat, rnorm = opt.nnls(A, y)
return beta_hat, rnorm
def elastic_basis_pursuit(x, y, oracle, kernel, initial_theta=None, bounds=None,
max_iter=1000, beta_tol=10e-6):
"""
Elastic basis pursuit
Fit a mixture model::
..math::
y = \sum{w_i f_{\theta_i} (x_i)}
with y data, f a kernel function parameterized by $\theta_i$ and \w_i a
non-negative weight, and x inputs to the kernel function
Parameters
----------
x : 1D/2D array
The independent variable that produces the data
y : 1D/2D darray
The data to be fit.
oracle : callable
This is a function that takes data (`x`/`y`) and a kernel function
(`kernel`) and returns the params theta for the kernel given x and
y. The oracle can use any optimization routine, and any cost function
kernel : callable
A skeleton for the oracle function to optimize. Must take something
of the dimensions of x (together with params, and with args) and return
something of the dimensions of y.
initial_theta : list/array
The initial parameter guess
bounds : the bounds on
"""
# Divide this up into a fit set and a validation set. We'll stop fitting
# when error on the validation set starts climbing:
fit_x = x[:, ::2]
validate_x = x[:, 1::2]
fit_y = y[::2]
validate_y = y[1::2]
# Initialize a bunch of empty lists to hold the state:
theta = []
est = []
design_list = []
r = []
err = [np.var(fit_y)] # Start with the assumption of
err_norm = []
# Initialize the residuals with the fit_data:
r.append(fit_y)
# Limit this by number of iterations
for i in range(max_iter):
theta.append(oracle(fit_x, r[-1], kernel, initial_theta,
bounds=bounds))
design = parameters_to_regressors(fit_x, kernel, theta)
beta_hat, rnorm = solve_nnls(fit_x, fit_y, design=design)
# Here comes the "elastic" bit. We exclude kernels with insignificant
# contributions:
keep_idx = np.where(beta_hat > beta_tol)
# We want this to still be a list (so we can 'append'):
theta = list(np.array(theta)[keep_idx])
beta_hat = beta_hat[keep_idx]
design = design[:, keep_idx[0]]
# Move on with the shrunken basis set:
est.append(np.dot(design, beta_hat))
r.append(fit_y - est[-1])
# Cross-validation:
xval_design = parameters_to_regressors(validate_x, kernel, theta)
xval_est = np.dot(xval_design, beta_hat)
xval_r = validate_y - xval_est
err.append(np.dot(xval_r, xval_r))
# If error just grew, we bail:
if err[i+1] > err[i]:
break
return theta, err, r
|
vistalab/elastic_basis_pursuit
|
ebp/elastic_basis_pursuit.py
|
Python
|
mit
| 8,059
|
[
"Gaussian"
] |
b2718bd1c43d5f77ab46391b38f36cca5874a96aef4512b0e36f7d5ae8942182
|
# Test
import bvp
dbi = bvp.DBInterface()
db_update_script = """
import bvp
import bpy
import numpy as np
# Parameters
n_samples = 5
# Set scene to particular action
scn = bpy.data.scenes["{act_name}"]
bvp.utils.blender.set_scene(scn.name)
# Make sure armature object for action is selected
grp = bpy.data.groups["{act_name}"]
bvp.utils.blender.grab_only(grp)
ob = bpy.context.object
# Following is mostly lifted from modifications we made to Action.from_blender()
# Get action
act = ob.animation_data.action
ob_list = [ob] + list(ob.children)
st = int(np.floor(act.frame_range[0]))
fin = int(np.ceil(act.frame_range[1]))
# Loop over all frames in action
mn = []
mx = []
for fr in range(st, fin):
# Update scene frame
scn.frame_set(fr)
scn.update()
# Re-visit me
mntmp, mxtmp = bvp.utils.blender.get_group_bounding_box(ob_list)
mn.append(mntmp)
mx.append(mxtmp)
min_xyz = np.min(np.vstack(mn), axis=0).tolist()
max_xyz = np.max(np.vstack(mx), axis=0).tolist()
# Select specific frames
idx = np.floor(np.linspace(st, fin, n_samples)).astype(np.int)
idx[-1] -= 1
min_xyz_trajectory = [mn[ii] for ii in idx]
max_xyz_trajectory = [mx[ii] for ii in idx]
# Get database interface object
dbi = bvp.DBInterface()
# Update the document in the database for this action with the new fields we need
# This is sufficient information to identify an action in the database
act_doc = dict(name="{act_name}")
# Check on what you're about to do
print(dbi.query(**act_doc))
print(min_xyz_trajectory)
print(max_xyz_trajectory)
# Uncomment these lines to make update for real
#dbi._update_value(act_doc, "min_xyz_trajectory", min_xyz_trajectory)
#dbi._update_value(act_doc, "max_xyz_trajectory", max_xyz_trajectory)
"""
act_list = dbi.query(type='Action')
for act in act_list: # This is the pythonic way to iterate over a list
#act = act_list[i] # This is a very matlab / C way to program =-)
script = db_update_script.format(act_name=act.name)
stdout, stderr = bvp.blend(script, blend_file=act.fpath)
try:
# python 3
print(str(stdout,'utf-8'))
print(str(stderr,'utf-8'))
except:
# python 2
print(stdout)
print(stderr)
|
marklescroart/bvp
|
Scripts/update_val_script.py
|
Python
|
bsd-2-clause
| 2,207
|
[
"VisIt"
] |
7b44519ff67cdb320e53037a503e309a8447a8f534f7b47d086716c3b2c5cc9f
|
"""
objectstore package, abstraction for storing blobs of data for use in Galaxy,
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import os
import random
import shutil
import logging
import threading
from xml.etree import ElementTree
from galaxy.util import umask_fix_perms, force_symlink
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util.sleeper import Sleeper
from galaxy.util.directory_hash import directory_hash_id
from galaxy.util.odict import odict
try:
from sqlalchemy.orm import object_session
except ImportError:
object_session = None
NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present."
log = logging.getLogger( __name__ )
class ObjectStore(object):
"""
ObjectStore abstract interface
"""
def __init__(self, config, config_xml=None, **kwargs):
self.running = True
self.extra_dirs = {}
def shutdown(self):
self.running = False
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Returns True if the object identified by `obj` exists in this file
store, False otherwise.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: object
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: bool
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the dataset
itself. This option applies to `extra_dir` argument as
well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where
the dataset identified by `obj` should be located.
(e.g., 000/extra_dir/obj.id)
:type extra_dir_at_root: bool
:param extra_dir_at_root: Applicable only if `extra_dir` is set.
If True, the `extra_dir` argument is placed at
root of the created directory structure rather
than at the end (e.g., extra_dir/000/obj.id
vs. 000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
"""
raise NotImplementedError()
def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
""" A helper method that checks if a file corresponding to a dataset
is ready and available to be used. Return True if so, False otherwise."""
return True
def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Mark the object identified by `obj` as existing in the store, but with
no content. This method will create a proper directory structure for
the file if the directory does not already exist.
See `exists` method for the description of other fields.
"""
raise NotImplementedError()
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Deletes the object identified by `obj`.
See `exists` method for the description of other fields.
:type entire_dir: bool
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir option.
"""
raise NotImplementedError()
def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Fetch `count` bytes of data starting at offset `start` from the
object identified uniquely by `obj`.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of other fields.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Get the expected filename (including the absolute path) which can be used
to access the contents of the object uniquely identified by `obj`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False):
"""
Inform the store that the file associated with the object has been
updated. If `file_name` is provided, update from that file instead
of the default.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of other fields.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: bool
:param create: If True and the default dataset does not exist, create it first.
"""
raise NotImplementedError()
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
If the store supports direct URL access, return a URL. Otherwise return
None.
Note: need to be careful to to bypass dataset security with this.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
def get_store_usage_percent(self):
"""
Return the percentage indicating how full the store is
"""
raise NotImplementedError()
## def get_staging_command( id ):
## """
## Return a shell command that can be prepended to the job script to stage the
## dataset -- runs on worker nodes.
##
## Note: not sure about the interface here. Should this return a filename, command
## tuple? Is this even a good idea, seems very useful for S3, other object stores?
## """
class DiskObjectStore(ObjectStore):
"""
Standard Galaxy object store, stores objects in files under a specific
directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=077, job_working_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), file_path=file_path)
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
def __init__(self, config, config_xml=None, file_path=None, extra_dirs=None):
super(DiskObjectStore, self).__init__(config, config_xml=None, file_path=file_path, extra_dirs=extra_dirs)
self.file_path = file_path or config.file_path
self.config = config
self.check_old_style = config.object_store_check_old_style
self.extra_dirs['job_work'] = config.job_working_directory
self.extra_dirs['temp'] = config.new_file_path
#The new config_xml overrides universe settings.
if config_xml is not None:
for e in config_xml:
if e.tag == 'files_dir':
self.file_path = e.get('path')
else:
self.extra_dirs[e.tag] = e.get('path')
if extra_dirs is not None:
self.extra_dirs.update( extra_dirs )
def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Class method that returns the absolute path for the file corresponding
to the `obj`.id regardless of whether the file exists.
"""
path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True)
# For backward compatibility, check the old style root path first; otherwise,
# construct hashed path
if not os.path.exists(path):
return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
# TODO: rename to _disk_path or something like that to avoid conflicts with children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
""" Construct the expected absolute path for accessing the object
identified by `obj`.id.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: bool
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected path
used to access the object identified by `obj`
(e.g., /files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: bool
param old_style: This option is used for backward compatibility. If True
the composed directory structure does not include a hash id
(e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new))
"""
base = self.extra_dirs.get(base_dir, self.file_path)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return os.path.abspath(path)
def exists(self, obj, **kwargs):
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
# and check hashed path
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, 'w').close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0666)
def empty(self, obj, **kwargs):
return os.path.getsize(self.get_filename(obj, **kwargs)) == 0
def size(self, obj, **kwargs):
if self.exists(obj, **kwargs):
try:
return os.path.getsize(self.get_filename(obj, **kwargs))
except OSError:
return 0
else:
return 0
def delete(self, obj, entire_dir=False, **kwargs):
path = self.get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
if entire_dir and extra_dir:
shutil.rmtree(path)
return True
if self.exists(obj, **kwargs):
os.remove(path)
return True
except OSError, ex:
log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
data_file = open(self.get_filename(obj, **kwargs), 'r') # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
# and return hashed path
if os.path.exists(path):
return path
return self._construct_path(obj, **kwargs)
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
""" `create` parameter is not used in this implementation """
preserve_symlinks = kwargs.pop( 'preserve_symlinks', False )
#FIXME: symlinks and the object store model may not play well together
#these should be handled better, e.g. registering the symlink'd file as an object
if create:
self.create(obj, **kwargs)
if file_name and self.exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink( file_name ):
force_symlink( os.readlink( file_name ), self.get_filename( obj, **kwargs ) )
else:
shutil.copy( file_name, self.get_filename( obj, **kwargs ) )
except IOError, ex:
log.critical('Error copying %s to %s: %s' % (file_name,
self._get_filename(obj, **kwargs), ex))
raise ex
def get_object_url(self, obj, **kwargs):
return None
def get_store_usage_percent(self):
st = os.statvfs(self.file_path)
return ( float( st.f_blocks - st.f_bavail ) / st.f_blocks ) * 100
class CachingObjectStore(ObjectStore):
"""
Object store that uses a directory for caching files, but defers and writes
back to another object store.
"""
def __init__(self, path, backend):
super(CachingObjectStore, self).__init__(self, path, backend)
class NestedObjectStore(ObjectStore):
"""
Base for ObjectStores that use other ObjectStores
(DistributedObjectStore, HierarchicalObjectStore)
"""
def __init__(self, config, config_xml=None):
super(NestedObjectStore, self).__init__(config, config_xml=config_xml)
self.backends = {}
def shutdown(self):
for store in self.backends.values():
store.shutdown()
super(NestedObjectStore, self).shutdown()
def exists(self, obj, **kwargs):
return self.__call_method('exists', obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
return self.__call_method('file_ready', obj, False, False, **kwargs)
def create(self, obj, **kwargs):
random.choice(self.backends.values()).create(obj, **kwargs)
def empty(self, obj, **kwargs):
return self.__call_method('empty', obj, True, False, **kwargs)
def size(self, obj, **kwargs):
return self.__call_method('size', obj, 0, False, **kwargs)
def delete(self, obj, **kwargs):
return self.__call_method('delete', obj, False, False, **kwargs)
def get_data(self, obj, **kwargs):
return self.__call_method('get_data', obj, ObjectNotFound, True, **kwargs)
def get_filename(self, obj, **kwargs):
return self.__call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
def update_from_file(self, obj, **kwargs):
if kwargs.get('create', False):
self.create(obj, **kwargs)
kwargs['create'] = False
return self.__call_method('update_from_file', obj, ObjectNotFound, True, **kwargs)
def get_object_url(self, obj, **kwargs):
return self.__call_method('get_object_url', obj, None, False, **kwargs)
def __call_method(self, method, obj, default, default_is_exception, **kwargs):
"""
Check all children object stores for the first one with the dataset
"""
for key, store in self.backends.items():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default( 'objectstore, __call_method failed: %s on %s, kwargs: %s'
% ( method, str( obj ), str( kwargs ) ) )
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends, for getting objects the
first store where the object exists is used, objects are created in a
store selected randomly, but with weighting.
"""
def __init__(self, config, config_xml=None, fsmon=False):
super(DistributedObjectStore, self).__init__(config, config_xml=config_xml)
if config_xml is None:
self.distributed_config = config.distributed_object_store_config_file
assert self.distributed_config is not None, "distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = 0.0
random.seed()
self.__parse_distributed_config(config, config_xml)
self.sleeper = None
if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon( True )
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
def __parse_distributed_config(self, config, config_xml=None):
if config_xml is None:
root = ElementTree.parse(self.distributed_config).getroot()
log.debug('Loading backends for distributed object store from %s' % self.distributed_config)
else:
root = config_xml.find('backends')
log.debug('Loading backends for distributed object store from %s' % config_xml.get('id'))
self.global_max_percent_full = float(root.get('maxpctfull', 0))
for elem in [ e for e in root if e.tag == 'backend' ]:
id = elem.get('id')
weight = int(elem.get('weight', 1))
maxpctfull = float(elem.get('maxpctfull', 0))
if elem.get('type', 'disk'):
path = None
extra_dirs = {}
for sub in elem:
if sub.tag == 'files_dir':
path = sub.get('path')
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs[type] = sub.get('path')
self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
self.max_percent_full[id] = maxpctfull
log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path))
if extra_dirs:
log.debug(" Extra directories:")
for type, dir in extra_dirs.items():
log.debug(" %s: %s" % (type, dir))
for i in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(id)
self.original_weighted_backend_ids = self.weighted_backend_ids
def shutdown(self):
super(DistributedObjectStore, self).shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = filter(lambda x: x != id, new_weighted_backend_ids)
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def create(self, obj, **kwargs):
"""
create() is the only method in which obj.object_store_id may be None
"""
if obj.object_store_id is None or not self.exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.weighted_backend_ids:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid( 'objectstore.create, could not generate obj.object_store_id: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
create_object_in_session( obj )
log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def __call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default( 'objectstore, __call_method failed: %s on %s, kwargs: %s'
% ( method, str( obj ), str( kwargs ) ) )
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None and obj.object_store_id in self.backends:
return obj.object_store_id
else:
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid' % (obj.object_store_id, obj.__class__.__name__, obj.id))
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s' % (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
create_object_in_session( obj )
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends, for getting objects the
first store where the object exists is used, objects are always created
in the first store.
"""
def __init__(self, config, config_xml=None, fsmon=False):
super(HierarchicalObjectStore, self).__init__(config, config_xml=config_xml)
self.backends = odict()
for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))):
self.backends[int(b.get('order'))] = build_object_store_from_config(config, fsmon=fsmon, config_xml=b)
def exists(self, obj, **kwargs):
"""
Exists must check all child object stores
"""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def create(self, obj, **kwargs):
"""
Create will always be called by the primary object_store
"""
self.backends[0].create(obj, **kwargs)
def build_object_store_from_config(config, fsmon=False, config_xml=None):
"""
Depending on the configuration setting, invoke the appropriate object store
"""
if config_xml is None and os.path.exists( config.object_store_config_file ):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
root = ElementTree.parse(config.object_store_config_file).getroot()
store = root.get('type')
config_xml = root
elif config_xml is not None:
store = config_xml.get('type')
else:
store = config.object_store
if store == 'disk':
return DiskObjectStore(config=config, config_xml=config_xml)
elif store == 's3':
from .s3 import S3ObjectStore
return S3ObjectStore(config=config, config_xml=config_xml)
elif store == 'swift':
from .s3 import SwiftObjectStore
return SwiftObjectStore(config=config, config_xml=config_xml)
elif store == 'distributed':
return DistributedObjectStore(config=config, fsmon=fsmon, config_xml=config_xml)
elif store == 'hierarchical':
return HierarchicalObjectStore(config=config, config_xml=config_xml)
elif store == 'irods':
from .rods import IRODSObjectStore
return IRODSObjectStore(config=config, config_xml=config_xml)
elif store == 'pulsar':
from .pulsar import PulsarObjectStore
return PulsarObjectStore(config=config, config_xml=config_xml)
else:
log.error("Unrecognized object store definition: {0}".format(store))
def local_extra_dirs( func ):
""" A decorator for non-local plugins to utilize local directories for their extra_dirs (job_working_directory and temp).
"""
def wraps( self, *args, **kwargs ):
if kwargs.get( 'base_dir', None ) is None:
return func( self, *args, **kwargs )
else:
for c in self.__class__.__mro__:
if c.__name__ == 'DiskObjectStore':
return getattr( c, func.__name__ )( self, *args, **kwargs )
raise Exception( "Could not call DiskObjectStore's %s method, does your Object Store plugin inherit from DiskObjectStore?" % func.__name__ )
return wraps
def convert_bytes(bytes):
""" A helper function used for pretty printing disk usage """
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fTB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fb' % bytes
return size
def create_object_in_session( obj ):
session = object_session( obj ) if object_session is not None else None
if session is not None:
object_session( obj ).add( obj )
object_session( obj ).flush()
else:
raise Exception( NO_SESSION_ERROR_MESSAGE )
|
jmchilton/pulsar
|
galaxy/objectstore/__init__.py
|
Python
|
apache-2.0
| 29,544
|
[
"Galaxy"
] |
f3b95dd654d87aa1d9838c867f06727c716978634e8ac159bb038ac5bffc5916
|
from django.urls import reverse
from django.test import TestCase
from .views import error
class SimpleTest(TestCase):
def test_home(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_error_route(self):
visit = lambda: self.client.get(reverse('error'))
self.assertRaises(Exception, error)
self.assertRaises(Exception, visit)
|
gregazevedo/gregazevedo
|
gregazevedo/home/tests.py
|
Python
|
mit
| 417
|
[
"VisIt"
] |
451b4df3f876a2e83cc6b227b722c68f6a71b4ae3717635d6debfb74977cf855
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyAse(PythonPackage):
"""The Atomic Simulation Environment (ASE) is a set of tools
and Python modules for setting up, manipulating, running,
visualizing and analyzing atomistic simulations."""
homepage = "https://wiki.fysik.dtu.dk/ase/"
url = "https://pypi.io/packages/source/a/ase/ase-3.13.0.tar.gz"
version('3.13.0', 'e946a0addc5b61e5e2e75857e0f99b89')
depends_on('python@2.6:')
depends_on('py-numpy', type=('build', 'run'))
|
wscullin/spack
|
var/spack/repos/builtin/packages/py-ase/package.py
|
Python
|
lgpl-2.1
| 1,729
|
[
"ASE"
] |
49936d7e320c265799dfd8be13519451463c94e7915a3a0da439b63b8d68c598
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from brew.grains import Grain
from brew.grains import GrainAddition
from brew.hops import Hop
from brew.hops import HopAddition
from brew.recipes import Recipe
from brew.yeasts import Yeast
"""
Build a recipe from known ingredients.
"""
def main():
# Define Grains
pale = Grain(u"Pale Malt (2 Row) US", color=1.8, ppg=37)
pale_add = GrainAddition(pale, weight=13.96)
crystal = Grain(u"Caramel/Crystal Malt - 20L", color=20.0, ppg=35)
crystal_add = GrainAddition(crystal, weight=0.78)
grain_additions = [pale_add, crystal_add]
# Define Hops
centennial = Hop(u"Centennial", percent_alpha_acids=0.14)
centennial_add = HopAddition(centennial, weight=0.57, boil_time=60.0)
cascade = Hop(u"Cascade (US)", percent_alpha_acids=0.07)
cascade_add = HopAddition(cascade, weight=0.76, boil_time=5.0)
hop_additions = [centennial_add, cascade_add]
# Define Yeast
yeast = Yeast(u"Wyeast 1056")
# Define Recipe
beer = Recipe(
u"pale ale",
grain_additions=grain_additions,
hop_additions=hop_additions,
yeast=yeast,
brew_house_yield=0.70, # %
start_volume=7.0, # G
final_volume=5.0, # G
)
print(beer.format())
if __name__ == "__main__":
main()
|
chrisgilmerproj/brewday
|
examples/pale_ale_recipe.py
|
Python
|
mit
| 1,317
|
[
"CRYSTAL"
] |
5ffc2f2aa22b63457fcf6bf4a353c529108b7fddf884d95f77077a48f2a38e4f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class N2(MEoS):
"""Multiparamente equation of state for nitrogen"""
name = "nitrogen"
CASNumber = "7727-37-9"
formula = "N2"
synonym = "R-728"
rhoc = unidades.Density(313.3)
Tc = unidades.Temperature(126.192)
Pc = unidades.Pressure(3395.8, "kPa")
M = 28.01348 # g/mol
Tt = unidades.Temperature(63.151)
Tb = unidades.Temperature(77.355)
f_acent = 0.0372
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 46
_Tr = unidades.Temperature(122.520245)
_rhor = unidades.Density(316.134310)
_w = 0.043553140
Fi1 = {"ao_log": [1, 2.5],
"pow": [0, 1, -1, -2, -3],
"ao_pow": [-12.76952708, -0.00784163, -1.934819e-4,
-1.247742e-5, 6.678326e-8],
"ao_exp": [1.012941],
"titao": [26.65788]}
Fi2 = {"ao_log": [1, 2.50031],
"pow": [0, 1],
"ao_pow": [11.083407489, -22.202102428],
"ao_exp": [], "titao": [],
"ao_hyp": [0.13732, -0.1466, 0.90066, 0],
"hyp": [5.25182262, -5.393067706, 13.788988208, 0]}
CP1 = {"ao": 3.5,
"an": [3.066469e-6, 4.70124e-9, -3.987984e-13], "pow": [1, 2, 3],
"ao_exp": [1.012941], "exp": [3364.011],
"ao_hyp": [], "hyp": []}
CP2 = {"ao": 3.50418363823,
"an": [-0.837079888737e3, 0.379147114487e2, -0.601737844275,
-0.874955653028e-5, 0.148958507239e-7, -0.256370354277e-11],
"pow": [-3, -2, -1.001, 1, 2, 3],
"ao_exp": [1.00773735767], "exp": [3353.4061],
"ao_hyp": [], "hyp": []}
CP3 = {"ao": 3.50031,
"an": [], "pow": [],
"ao_exp": [], "exp": [],
"ao_hyp": [0.13732, -0.1466, 0.90066, 0],
"hyp": [5.251822620*Tc, -5.393067706*Tc, 13.788988208*Tc, 0],
"R": 8.31451}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for nitrogen of Span et al. (2000).",
"__doi__": {"autor": "Span, R., Lemmon, E.W., Jacobsen, R.T, Wagner, W., Yokozeki, A.",
"title": "A Reference Equation of State for the Thermodynamic Properties of Nitrogen for Temperatures from 63.151 to 1000 K and Pressures to 2200 MPa",
"ref": "J. Phys. Chem. Ref. Data 29, 1361 (2000)",
"doi": "10.1063/1.1349047"},
"__test__":
# Pag 1403
"""
>>> st=N2(T=63.151, x=0.5)
>>> print "%0.6g %0.6f %0.5g %0.4g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
63.151 0.012523 30.957 0.02407 -4222.6 1814.7 67.951 163.55 32.95 21.01 56.03 29.65 995.3 161.1
>>> st=N2(T=70, x=0.5)
>>> print "%0.6g %0.4g %0.5g %0.4g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
70 0.03854 29.933 0.06768 -3837 1991.7 73.735 157 31.65 21.24 56.43 30.3 925.7 168.4
>>> st=N2(T=82, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.4g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
82 0.16947 28.006 0.265 -3149.6 2254.2 82.736 148.64 29.65 21.92 57.93 32.59 803.7 178
>>> st=N2(T=100, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
100 0.77827 24.608 1.1409 -2050.8 2458.6 94.576 139.67 27.54 23.95 64.93 42.09 605.2 183.3
>>> st=N2(T=120, x=0.5)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
120 2.51058 18.682 4.4653 -500.6 2077.8 107.89 129.38 28.31 30.77 126.3 129.7 317.3 172.6
>>> st=N2(T=122, x=0.5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
122 2.7727 17.633 5.2696 -277.39 1933.5 109.62 127.74 29.38 32.78 163.7 187.6 276.5 169.5
>>> st=N2(T=124, x=0.5)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
124 3.05618 16.23 6.4301 -3.0925 1714.7 111.71 125.56 31.83 36.12 271.2 356.6 227 164.7
>>> st=N2(T=126, x=0.5)
>>> print "%0.6g %0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g %0.4g %0.4g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.kJkmol, st.Gas.hM.kJkmol, \
st.Liquido.sM.kJkmolK, st.Gas.sM.kJkmolK, st.Liquido.cvM.kJkmolK, st.Gas.cvM.kJkmolK, \
st.Liquido.cpM.kJkmolK, st.Gas.cpM.kJkmolK, st.Liquido.w, st.Gas.w)
126 3.36453 13.281 9.1106 492.37 1194.9 115.5 121.08 43.4 47.44 3138 4521 151 148.4
"""
# Pag 1410
"""
>>> st=N2(T=63.170, P=1e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
63.17 30.96 -4222.8 -4219.6 67.955 32.95 56.02 995.6
>>> st=N2(T=250, P=2e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
250 0.096369 5174.6 7250 180.66 20.82 29.25 322.4
>>> st=N2(T=100, P=5e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
100 0.67319 1898.9 2641.7 144.67 22.4 35.23 191.8
>>> st=N2(T=100, P=1e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
100 24.658 -2090.7 -2050.1 94.493 27.55 64.56 609.4
>>> st=N2(T=115, P=2e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
115 20.704 -1063.9 -967.29 104.14 27.25 89.82 405.8
>>> st=N2(T=115, P=2.5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
115 21.031 -1112.9 -994.04 103.7 27.02 83.78 428.3
>>> st=N2(T=120, P=3e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
120 19.312 -723.85 -568.5 107.11 27.49 104 355
>>> st=N2(T=125, P=3e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
125 5.378 1479.3 2037.1 128.23 30.16 143.5 177.1
>>> st=N2(T=125, P=3.5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
125 16.765 -230.88 -22.11 111.34 29.23 174.4 265
>>> st=N2(T=300, P=5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
300 2.0113 5944.8 8430.7 158.34 21.14 31.38 363.4
>>> st=N2(T=600, P=2e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.4g %0.4g %0.4g" % (\
st.T, st.rhoM, st.uM.kJkmol, st.hM.kJkmol, st.sM.kJkmolK, st.cvM.kJkmolK, st.cpM.kJkmolK, st.w)
600 3.6638 12183 17641 167.51 22.12 31.58 553.9
""",
"R": 8.31451,
"cp": Fi1,
"ref": {"Tref": 298.15, "Pref": 101325., "ho": 8670, "so": 191.5},
"Tmin": Tt, "Tmax": 2000.0, "Pmax": 2200000.0, "rhomax": 53.15,
"Pmin": 12.5198, "rhomin": 30.957,
"nr1": [0.924803575275, -0.492448489428, 0.661883336938,
-0.192902649201e1, -0.622469309629e-1, 0.349943957581],
"d1": [1, 1, 2, 2, 3, 3],
"t1": [0.25, 0.875, 0.5, 0.875, 0.375, 0.75],
"nr2": [0.564857472498, -0.161720005987e1, -0.481395031883,
0.421150636384, -0.161962230825e-1, 0.172100994165,
0.735448924933e-2, 0.168077305479e-1, -0.107626664179e-2,
-0.137318088513e-1, 0.635466899859e-3, 0.304432279419e-2,
-0.435762336045e-1, -0.723174889316e-1, 0.389644315272e-1,
-0.212201363910e-1, 0.408822981509e-2, -0.551990017984e-4,
-0.462016716479e-1, -0.300311716011e-2, 0.368825891208e-1,
-0.255856846220e-2, 0.896915264558e-2, -0.441513370350e-2,
0.133722924858e-2, 0.264832491957e-3],
"d2": [1, 1, 1, 3, 3, 4, 6, 6, 7, 7, 8, 8, 1, 2, 3, 4, 5, 8, 4, 5, 5,
8, 3, 5, 6, 9],
"t2": [0.5, 0.75, 2., 1.25, 3.5, 1., 0.5, 3., 0., 2.75, 0.75, 2.5, 4.,
6., 6., 3., 3., 6., 16., 11., 15., 12., 12., 7., 4., 16.],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 4, 4, 4, 4],
"gamma2": [1]*26,
"nr3": [0.196688194015e2, -0.209115600730e2, 0.167788306989e-1,
0.262767566274e4],
"d3": [1, 1, 3, 2],
"t3": [0., 1., 2., 3.],
"alfa3": [20, 20, 15, 25],
"beta3": [325, 325, 300, 275],
"gamma3": [1.16, 1.16, 1.13, 1.25],
"epsilon3": [1]*4,
"nr4": []}
MBWR = {
"__type__": "MBWR",
"__name__": "MBWR equation of state for nitrogen of Younglove (1982).",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"R": 8.31434,
"cp": CP1,
"Tmin": Tt, "Tmax": 1900.0, "Pmax": 1013000.0, "rhomax": 30.977,
"Pmin": 12.463, "rhomin": 30.977,
"b": [None, 0.1380297474657e-2, 0.1084506501349, -0.2471324064362e1,
0.3455257980807e2, -0.4279707690666e4, 0.1064911566998e-3,
-0.1140867079735e-1, 0.1444902497287e-3, 0.1871457567553e5,
0.8218876886831e-7, 0.2360990493348e-2, -0.5144803081201,
0.4914545013668e-4, -0.1151627162399e-2, -0.7168037246650,
0.7616667619500e-4, -0.1130930066213e-5, 0.3736831166831e-3,
-0.2039851507581e-5, -0.1719662008990e5, -0.1213055199748e6,
-0.9881399141428e2, 0.5619886893511e5, -0.1823043964118,
-0.2599826498477e1, -0.4191893423157e-3, -0.2596406670530,
-0.1258683201921e-6, 0.1049286599400e-4, -0.5458369305152e-9,
-0.7674511670597e-8, 0.5931232870994e-7]}
GERG = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for nitrogen of Kunz and Wagner (2004).",
"__doi__": {"autor": "Kunz, O., Wagner, W.",
"title": "The GERG-2008 Wide-Range Equation of State for \
Natural Gases and Other Mixtures: An Expansion of GERG-2004",
"ref": "J. Chem. Eng. Data, 2012, 57 (11), pp 3032–3091",
"doi": "10.1021/je300655b"},
"R": 8.314472,
"cp": Fi2,
"ref": "OTO",
"Tmin": Tt, "Tmax": 2000.0, "Pmax": 2200000.0, "rhomax": 53.15,
# "Pmin": 73.476, "rhomin": 29.249,
"nr1": [0.59889711801201, -0.16941557480731e1, 0.24579736191718,
-0.23722456755175, 0.17954918715141e-1, 0.14592875720215e-1],
"d1": [1, 1, 2, 2, 4, 4],
"t1": [0.125, 1.125, 0.375, 1.125, 0.625, 1.5],
"nr2": [0.10008065936206, 0.73157115385532, -0.88372272336366,
0.31887660246708, 0.20766491728799, -0.19379315454158e-1,
-0.16936641554983, 0.13546846041701, -0.33066712095307e-1,
-0.60690817018557e-1, 0.12797548292871e-1, 0.58743664107299e-2,
-0.18451951971969e-1, 0.47226622042472e-2, -0.52024079680599e-2,
0.43563505956635e-1, -0.36251690750939e-1, -0.28974026866543e-2],
"d2": [1, 1, 1, 2, 3, 6, 2, 3, 3, 4, 4, 2, 3, 4, 5, 6, 6, 7],
"t2": [0.625, 2.625, 2.75, 2.125, 2, 1.75, 4.5, 4.75, 5, 4, 4.5, 7.5,
14, 11.5, 26, 28, 30, 16],
"c2": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 6, 6, 6, 6],
"gamma2": [1]*18,
"nr3": [],
"nr4": []}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for nitrogen of Jacobsen et al. (1986).",
"__doi__": {"autor": "Jacobsen, R.T, Stewart, R.B., and Jahangiri, M.",
"title": "Thermodynamic properties of nitrogen from the freezing line to 2000 K at pressures to 1000 MPa",
"ref": "J. Phys. Chem. Ref. Data, 15(2):735-909, 1986",
"doi": "10.1007/BF00502385"},
"__test__":
#Table 21, Pag 795
"""
>>> st=N2(T=63.15, x=0.5, eq=3)
>>> print "%0.6g %0.4g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
63.15 0.01253 31.046 0.02412 -4227.5 1806.3 67.89 163.43 31.29 23.94 56.56 33.27 1022 159
>>> st=N2(T=70, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
70 0.03857 29.98 0.06784 -3840.3 1980.5 73.70 156.85 30.64 25.24 56.46 35.36 933 166
>>> st=N2(T=80, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
80 0.13699 28.351 0.21801 -3268.9 2202.7 81.28 149.67 29.63 26.52 57.65 38.34 821 174
>>> st=N2(T=90, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
90 0.36066 26.581 0.53967 -2677.4 2368.8 88.15 144.22 28.64 27.02 60.18 41.59 713 179
>>> st=N2(T=100, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
100 0.77881 24.584 1.1436 -2050.5 2451.0 94.58 139.59 27.84 27.43 65.09 47.46 601 181
>>> st=N2(T=110, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.4g %0.5g %0.4g %0.4g %0.3g %0.3g" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, \
st.Liquido.cpM.JmolK, st.Liquido.w, st.Gas.w)
110 1.4672 22.172 2.2377 -1357.1 2401.3 100.9 135.07 27.55 28.57 76.90 62.45 473 178
>>> st=N2(T=120, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK,\
st.Liquido.cpM.JmolK, st.Gas.cpM.JmolK, st.Liquido.w, st.Gas.w)
120 2.5125 18.643 4.4632 -493.19 2082.1 107.95 129.41 29.06 31.78 128.9 131.2 309 171
>>> st=N2(T=126, x=0.5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.1f %0.1f %0.2f %0.2f %0.2f %0.2f %0.0f %0.0f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.hM.Jmol, st.Gas.hM.Jmol, \
st.Liquido.sM.JmolK, st.Gas.sM.JmolK, st.Liquido.cvM.JmolK, st.Gas.cvM.JmolK, st.Liquido.w, st.Gas.w)
126 3.3664 13.304 9.1698 495.38 1194.9 115.53 121.08 37.66 39.64 168 159
"""
#Table 22, Pag 799
"""
>>> st=N2(T=84, P=2e4, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
84 0.02882 1729.5 2423.6 168.03 20.84 29.33 186
>>> st=N2(T=1200, P=8e4, eq=3)
>>> print "%0.6g %0.5f %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
1200 0.00802 26800 36780 236.08 25.41 33.73 688
>>> st=N2(T=70, P=1e5, eq=3)
>>> print "%0.6g %0.5f %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
70 29.984 -3842.3 -3839.0 73.68 30.64 56.45 934
>>> st=N2(T=150, P=1.5e5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 0.12133 3085.8 4322.1 168.09 20.87 29.50 249
>>> st=N2(T=300, P=5e5, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
300 0.20064 6199.8 8691.9 178.31 20.85 29.35 354
>>> st=N2(T=102, P=1e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
102 24.173 -1960.1 -1918.8 95.79 27.71 66.39 580
>>> st=N2(T=150, P=2e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 1.8268 2764.8 3859.6 144.38 21.98 36.31 237
>>> st=N2(T=122, P=3e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
122 18.077 -492.64 -326.68 109.11 29.03 136.2 297
>>> st=N2(T=150, P=3e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.2f %0.2f %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 2.9663 2557.5 3568.8 139.59 22.73 42.46 232
>>> st=N2(T=144, P=4e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
144 4.8655 2080.2 2902.4 133.24 24.38 61.11 217
>>> st=N2(T=150, P=5e6, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 6.0248 2030.6 2860.5 131.7 24.53 66.06 227
>>> st=N2(T=1000, P=1e7, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
1000 1.1622 21730 30334 189.8 24.47 32.95 654
>>> st=N2(T=80, P=5e7, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
80 31.566 -3758.4 -2174.4 74.29 32.88 51.17 1117
>>> st=N2(T=150, P=1e8, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
150 28.212 -1230.3 2314.4 99.76 28.02 44.74 1053
>>> st=N2(T=700, P=5e8, eq=3)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.2f %0.4g %0.4g %0.0f" % (\
st.T, st.rhoM, st.uM.Jmol, st.hM.Jmol, st.sM.JmolK, st.cvM.JmolK, st.cpM.JmolK, st.w)
700 26.085 13376 32544 143.38 26.67 35.08 1544
""",
"R": 8.31434,
"cp": CP2,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 8669, "so": 191.502},
"Tc": 126.193, "Pc": 3397.8, "rhoc": 11.177, "Tt": 63.148, "M": 28.0134,
"Tmin": Tt, "Tmax": 2000.0, "Pmax": 1000000.0, "rhomax": 30.96,
"Pmin": 12.52, "rhomin": 31.046,
"nr1": [0.9499541827, 0.2481718513, -0.2046287122, -0.1748429008,
0.6387017148, -0.5272986168, -0.2049741504e1, 0.5551383553e-1,
-0.8191106396e-3, -0.5032519699e-1, 0.2650110798, 0.7311459372e-1,
-0.2813080718e-1, 0.1659823569e-2, 0.6012817812e-1,
-0.3785445194, 0.1895290433, -0.7001895093e-2],
"d1": [1, 2, 3, 2, 3, 3, 1, 4, 6, 2, 1, 2, 4, 6, 2, 1, 2, 4],
"t1": [0.25, 0.25, 0.25, 0.5, 0.5, 0.75, 1, 1, 1, 1, 1.5, 2, 2, 2, 2,
3, 3, 3],
"nr2": [-0.4927710927e-1, 0.6512013679e-1, 0.113812194200,
-0.955140963197e-1, 0.2118354140e-1, -0.1100721771e-1,
0.1284432210e-1, -0.1054474910e-1, -0.1484600538e-3,
-0.5806483467e-2],
"d2": [1, 4, 1, 2, 4, 2, 4, 4, 2, 3],
"t2": [3, 4, 4, 5, 6, 8, 14, 18, 20, 22],
"c2": [3, 2, 3, 2, 2, 4, 4, 4, 4, 3],
"gamma2": [1]*10,
"nr3": [],
"nr4": []}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for nitrogen of Span and Wagner (2003).",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=N2(T=700, rho=200, eq=4)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
1.0979 51.268 1.1719
>>> st2=N2(T=750, rho=100, eq=4)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
41.82 0.31052
""", # Table III, Pag 46
"R": 8.31451,
"cp": Fi1,
"ref": {"Tref": 298.15, "Pref": 101325., "ho": 8670, "so": 191.5},
"Tmin": Tt, "Tmax": 600.0, "Pmax": 100000.0, "rhomax": 53.15,
"Pmin": 12.566, "rhomin": 30.935,
"nr1": [0.92296567, -0.25575012e1, 0.64482463, 0.1083102e-1,
0.73924167e-1, 0.23532962e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.18024854, -0.45660299e-1, -0.1552106, -0.3811149e-1,
-0.31962422e-1, 0.15513532e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6,
"nr3": [],
"nr4": []}
helmholtz5 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for nitrogen of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.31451,
"cp": Fi1,
"ref": {"Tref": 298.15, "Pref": 101325., "ho": 8670, "so": 191.5},
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 40.,
"Pmin": 0.1, "rhomin": 40.,
"nr1": [9.57664698e-1, 8.68692283e-1, -2.88536117, 6.12953165e-2,
2.55919463e-4, 1.69423647e-2],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [-4.43639900e-2, 1.37987734e-1, 2.77148365e-1, -1.44381707e-2,
-1.69955805e-1, 5.46894457e-3, -2.87747274e-2, -2.38630424e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
eq = helmholtz1, MBWR, GERG, helmholtz3, helmholtz4, helmholtz5
_PR = -0.004032
_surface = {"sigma": [0.02898], "exp": [1.246]}
_dielectric = {"eq": 3, "Tref": 273.16, "rhoref": 1000.,
"a0": [], "expt0": [], "expd0": [],
"a1": [4.3872, 0.00226], "expt1": [0, 1], "expd1": [1, 1],
"a2": [2.206, 1.135, -169., -35.83],
"expt2": [0, 1, 0, 1], "expd2": [2, 2, 3.1, 3.1]}
_melting = {"eq": 1, "Tref": Tt, "Pref": 12.523,
"Tmin": Tt, "Tmax": 2000.0,
"a1": [1, 12798.61, -12798.61], "exp1": [0, 1.78963, 0],
"a2": [], "exp2": [], "a3": [], "exp3": []}
_sublimation = {"eq": 3, "Tref": Tt, "Pref": 12.523,
"Tmin": Tt, "Tmax": Tt,
"a1": [], "exp1": [],
"a2": [-13.088692], "exp2": [1],
"a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 6,
"ao": [-0.612445284e1, 0.126327220e1, -0.765910082, -0.177570564e1],
"exp": [2, 3, 5, 10]}
_liquid_Density = {
"eq": 4,
"ao": [0.148654237e1, -0.280476066, 0.894143085e-1, -0.119879866],
"exp": [0.9882, 2, 8, 17.5]}
_vapor_Density = {
"eq": 6,
"ao": [-0.170127164e1, -0.370402649e1, 0.129859383e1, -0.561424977,
-0.268505381e1],
"exp": [1.02, 2.5, 3.5, 6.5, 14]}
visco0 = {"eq": 1, "omega": 1,
"__name__": "Lemmon (2004)",
"__doi__": {"autor": "Lemmon, E.W. and Jacobsen, R.T.",
"title": "Viscosity and Thermal Conductivity Equations for Nitrogen, Oxygen, Argon, and Air",
"ref": "Int. J. Thermophys., 25:21-69, 2004.",
"doi": "10.1023/B:IJOT.0000022327.04529.f3"},
"__test__": """
>>> st=N2(T=100, rhom=0)
>>> print "%0.5f" % st.mu.muPas
6.90349
>>> st=N2(T=300, rhom=0)
>>> print "%0.4f" % st.mu.muPas
17.8771
>>> st=N2(T=100, rhom=28)
>>> print "%0.3f" % st.mu.muPas
79.7418
>>> st=N2(T=200, rhom=10)
>>> print "%0.4f" % st.mu.muPas
21.0810
>>> st=N2(T=300, rhom=5)
>>> print "%0.4f" % st.mu.muPas
20.7430
>>> st=N2(T=132.64, rhom=10.4)
>>> print "%0.4f" % st.mu.muPas
18.2978
""", # Table V, Pag 28
"Tref": 1., "etaref": 1, "rhoref": 1.*M,
"ek": 98.94, "sigma": 0.3656,
"Tref_res": 126.192, "rhoref_res": 11.1839*M, "etaref_res": 1,
"n_poly": [10.72, 0.03989, 0.001208, -7.402, 4.62],
"t_poly": [.1, .25, 3.2, .9, 0.3],
"d_poly": [2, 10, 12, 2, 1],
"g_poly": [0, 1, 1, 1, 1],
"c_poly": [0, 1, 1, 2, 3]}
visco1 = {"eq": 2, "omega": 2,
"collision": [-136.985150760851, 734.241371453542, -1655.39131952744,
2062.67809686969, -1579.52439123889, 777.942880032361,
-232.996787901831, 40.0691427576552, -2.99482706239363],
"__name__": "Younglove (1982)",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"ek": 118., "sigma": 0.354,
"n_chapman": 0.141286429751707,
"t_chapman": 0.0,
"F": [-3.14276193277e-3, 9.22071479907e-4, 1.4, 118],
"E": [-12.128154129, 68.46443564, 11.2569594404402, -565.76279020055,
9.56677570672e-2, -.355533724265011, 618.536783201947],
"rhoc": 11.2435750999429}
visco2 = {"eq": 1, "omega": 1,
"collision": [0.46649, -0.57015, 0.19164, -0.03708, 0.00241],
"__name__": "Stephan (1987)",
"__doi__": {"autor": "Stephan, K., Krauss, R., and Laesecke, A.",
"title": "Viscosity and Thermal Conductivity of Nitrogen for a Wide Range of Fluid States",
"ref": "J. Phys. Chem. Ref. Data, 16(4):993-1023, 1987.",
"doi": "10.1063/1.555798"},
"__test__": """
>>> st=N2(T=80, P=1e5, visco=2)
>>> print "%0.2f" % st.mu.muPas
5.24
>>> st=N2(T=80, P=1e7, visco=2)
>>> print "%0.2f" % st.mu.muPas
153.45
>>> st=N2(T=300, P=1e6, visco=2)
>>> print "%0.2f" % st.mu.muPas
18.03
>>> st=N2(T=1100, P=1e7, visco=2)
>>> print "%0.2f" % st.mu.muPas
44.67
>>> st=N2(T=100, P=4.5e7, visco=2)
>>> print "%0.2f" % st.mu.muPas
155.58
>>> st=N2(T=80, P=2e7, visco=2)
>>> print "%0.2f" % st.mu.muPas
28.37
>>> st=N2(T=200, P=5e7, visco=2)
>>> print "%0.2f" % st.mu.muPas
49.34
>>> st=N2(T=1100, P=1e8, visco=2)
>>> print "%0.2f" % st.mu.muPas
50.20
""", # Table A1, Pag 1013
"Tref": 1., "etaref": 1,
"ek": 100.01654, "sigma": 0.36502496,
"n_chapman": 0.141290/M**0.5,
"Tref_res": 1, "rhoref_res": 11.2088889*M, "etaref_res": 14.,
"n_poly": [-5.8470232, -1.4470051, -0.27766561e-1, -0.21662362],
"t_poly": [0, 0, 0, 0],
"d_poly": [0, 1, 2, 3],
"g_poly": [0, 0, 0, 0],
"c_poly": [0, 0, 0, 0],
"n_num": [-20.09997],
"t_num": [0],
"d_num": [0],
"g_num": [0],
"c_num": [0],
"n_den": [1.0, -3.4376416],
"t_den": [0, 0],
"d_den": [1, 0],
"g_den": [0, 0],
"c_den": [0, 0]}
_viscosity = visco0, visco1, visco2
thermo0 = {"eq": 1,
"__name__": "Lemmon (2004)",
"__doi__": {"autor": "Lemmon, E.W. and Jacobsen, R.T.",
"title": "Viscosity and Thermal Conductivity Equations for Nitrogen, Oxygen, Argon, and Air",
"ref": "Int. J. Thermophys., 25:21-69, 2004.",
"doi": "10.1023/B:IJOT.0000022327.04529.f3"},
"__test__": """
>>> st=N2(T=100, rhom=0)
>>> print "%0.5f" % st.k.mWmK
9.27749
>>> st=N2(T=300, rhom=0)
>>> print "%0.4f" % st.k.mWmK
25.9361
>>> st=N2(T=100, rhom=25)
>>> print "%0.3f" % st.k.mWmK
103.834
>>> st=N2(T=200, rhom=10)
>>> print "%0.4f" % st.k.mWmK
36.0099
>>> st=N2(T=300, rhom=5)
>>> print "%0.4f" % st.k.mWmK
32.7694
>>> st=N2(T=126.195, rhom=11.18)
>>> print "%0.4f" % st.k.mWmK
675.8
""", # Table V, Pag 28
"Tref": 126.192, "kref": 1e-3,
"no": [1.511, 2.117, -3.332],
"co": [-97, -1, -0.7],
"Trefb": 126.192, "rhorefb": 11.1839, "krefb": 1e-3,
"nb": [8.862, 31.11, -73.13, 20.03, -0.7096, 0.2672],
"tb": [0, 0.03, 0.2, 0.8, 0.6, 1.9],
"db": [1, 2, 3, 4, 8, 10],
"cb": [0, 0, 1, 2, 2, 2],
"critical": 3,
"gnu": 0.63, "gamma": 1.2415, "R0": 1.01,
"Xio": 0.17e-9, "gam0": 0.055, "qd": 0.40e-9, "Tcref": 252.384}
thermo1 = {"eq": 3,
"__name__": "Younglove (1982)",
"__doi__": {"autor": "Younglove, B.A.",
"title": "Thermophysical Properties of Fluids. I. Argon, Ethylene, Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen",
"ref": "J. Phys. Chem. Ref. Data, Vol. 11, Suppl. 1, pp. 1-11, 1982.",
"doi": ""},
"ek": 118, "sigma": 0.354,
"Nchapman": 0.141286429751707,
"tchapman": 0,
"b": [-.15055520615565, 0.183477124982509, 1.45008451566007,
-4.88031780663869, 6.68390592664363, -4.90242883649539,
2.02630917877999, -.439826733340102, 3.91906706514e-2],
"F": [1.50938067650e-3, 1.70975795748e-4, 1.2, 118],
"E": [-38.613291627, -31.826109485, 26.0197970589236,
-27.2869897441495, 0, 0, 0],
"rhoc": 35.6938892061679,
"ff": 1.67108,
"rm": 0.00000003933}
thermo2 = {"eq": 1, "critical": 0,
"__name__": "Stephan (1987)",
"__doi__": {"autor": "Stephan, K., Krauss, R., and Laesecke, A.",
"title": "Viscosity and Thermal Conductivity of Nitrogen for a Wide Range of Fluid States",
"ref": "J. Phys. Chem. Ref. Data, 16(4):993-1023, 1987.",
"doi": "10.1063/1.555798"},
"__test__": """
>>> st=N2(T=80, P=1e5, thermo=2)
>>> print "%0.2f" % st.k.mWmK
7.73
>>> st=N2(T=80, P=1e7, thermo=2)
>>> print "%0.2f" % st.k.mWmK
153.7
>>> st=N2(T=300, P=1e6, thermo=2)
>>> print "%0.2f" % st.k.mWmK
26.51
>>> st=N2(T=1100, P=1e7, thermo=2)
>>> print "%0.2f" % st.k.mWmK
72.32
>>> st=N2(T=100, P=4.5e7, thermo=2)
>>> print "%0.2f" % st.k.mWmK
166.13
>>> st=N2(T=80, P=2e7, thermo=2)
>>> print "%0.2f" % st.k.mWmK
162.75
>>> st=N2(T=200, P=5e7, thermo=2)
>>> print "%0.2f" % st.k.mWmK
80.58
>>> st=N2(T=1100, P=1e8, thermo=2)
>>> print "%0.2f" % st.k.mWmK
83.68
""", # Table B1, Pag 1018
"Tref": 1, "kref": 1e-3,
"no": [0.6950401, 0.03643102],
"co": [-97, -98],
"Trefb": 1, "rhorefb": 11.2088889, "krefb": 4.17e-3,
"nb": [3.3373542, 0.37098251, 0.89913456, 0.16972505],
"tb": [0, 0, 0, 0],
"db": [1, 2, 3, 4],
"cb": [0, 0, 0, 0]}
_thermal = thermo0, thermo1, thermo2
|
edusegzy/pychemqt
|
lib/mEoS/N2.py
|
Python
|
gpl-3.0
| 39,327
|
[
"Jmol"
] |
33ffa27ae167d1669d7ff79b932bdafdef70f344e813ebe4f8a82ecf384659d7
|
# Copyright 2009 by Osvaldo Zagordi. All rights reserved.
# Revisions copyright 2010 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the short read aligner Novoalign by Novocraft."""
from __future__ import print_function
from Bio.Application import _Option, AbstractCommandline
class NovoalignCommandline(AbstractCommandline):
"""Command line wrapper for novoalign by Novocraft.
See www.novocraft.com - novoalign is a short read alignment program.
Example:
>>> from Bio.Sequencing.Applications import NovoalignCommandline
>>> novoalign_cline = NovoalignCommandline(database='some_db',
... readfile='some_seq.txt')
>>> print(novoalign_cline)
novoalign -d some_db -f some_seq.txt
As with all the Biopython application wrappers, you can also add or
change options after creating the object:
>>> novoalign_cline.format = 'PRBnSEQ'
>>> novoalign_cline.r_method='0.99' # limited valid values
>>> novoalign_cline.fragment = '250 20' # must be given as a string
>>> novoalign_cline.miRNA = 100
>>> print(novoalign_cline)
novoalign -d some_db -f some_seq.txt -F PRBnSEQ -r 0.99 -i 250 20 -m 100
You would typically run the command line with novoalign_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Last checked against version: 2.05.04
"""
def __init__(self, cmd="novoalign", **kwargs):
READ_FORMAT = ['FA', 'SLXFQ', 'STDFQ', 'ILMFQ', 'PRB', 'PRBnSEQ']
REPORT_FORMAT = ['Native', 'Pairwise', 'SAM']
REPEAT_METHOD = ['None', 'Random', 'All', 'Exhaustive', '0.99']
self.parameters = [
_Option(["-d", "database"],
"database filename",
filename=True,
equate=False),
_Option(["-f", "readfile"],
"read file",
filename=True,
equate=False),
_Option(["-F", "format"],
"Format of read files.\n\nAllowed values: %s"
% ", ".join(READ_FORMAT),
checker_function=lambda x: x in READ_FORMAT,
equate=False),
# Alignment scoring options
_Option(["-t", "threshold"],
"Threshold for alignment score",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-g", "gap_open"],
"Gap opening penalty [default: 40]",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-x", "gap_extend"],
"Gap extend penalty [default: 15]",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-u", "unconverted"],
"Experimental: unconverted cytosines penalty in bisulfite mode\n\n"
"Default: no penalty",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Quality control and read filtering
_Option(["-l", "good_bases"],
"Minimum number of good quality bases [default: log(N_g, 4) + 5]",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-h", "homopolymer"],
"Homopolymer read filter [default: 20; disable: negative value]",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Read preprocessing options
_Option(["-a", "adapter3"],
"Strips a 3' adapter sequence prior to alignment.\n\n"
"With paired ends two adapters can be specified",
checker_function=lambda x: isinstance(x, str),
equate=False),
_Option(["-n", "truncate"],
"Truncate to specific length before alignment",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-s", "trimming"],
"If fail to align, trim by s bases until they map or become shorter than l.\n\n"
"Ddefault: 2",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-5", "adapter5"],
"Strips a 5' adapter sequence.\n\n"
"Similar to -a (adaptor3), but on the 5' end.",
checker_function=lambda x: isinstance(x, str),
equate=False),
# Reporting options
_Option(["-o", "report"],
"Specifies the report format.\n\nAllowed values: %s\nDefault: Native"
% ", ".join(REPORT_FORMAT),
checker_function=lambda x: x in REPORT_FORMAT,
equate=False),
_Option(["-Q", "quality"],
"Lower threshold for an alignment to be reported [default: 0]",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-R", "repeats"],
"If score difference is higher, report repeats.\n\n"
"Otherwise -r read method applies [default: 5]",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-r", "r_method"],
"Methods to report reads with multiple matches.\n\n"
"Allowed values: %s\n"
"'All' and 'Exhaustive' accept limits."
% ", ".join(REPEAT_METHOD),
checker_function=lambda x: x.split()[0] in REPEAT_METHOD,
equate=False),
_Option(["-e", "recorded"],
"Alignments recorded with score equal to the best.\n\n"
"Default: 1000 in default read method, otherwise no limit.",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Option(["-q", "qual_digits"],
"Decimal digits for quality scores [default: 0]",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Paired end options
_Option(["-i", "fragment"],
"Fragment length (2 reads + insert) and standard deviation [default: 250 30]",
checker_function=lambda x: len(x.split()) == 2,
equate=False),
_Option(["-v", "variation"],
"Structural variation penalty [default: 70]",
checker_function=lambda x: isinstance(x, int),
equate=False),
# miRNA mode
_Option(["-m", "miRNA"],
"Sets miRNA mode and optionally sets a value for the region scanned [default: off]",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Multithreading
_Option(["-c", "cores"],
"Number of threads, disabled on free versions [default: number of cores]",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Quality calibrations
_Option(["-k", "read_cal"],
"Read quality calibration from file (mismatch counts)",
checker_function=lambda x: isinstance(x, str),
equate=False),
_Option(["-K", "write_cal"],
"Accumulate mismatch counts and write to file",
checker_function=lambda x: isinstance(x, str),
equate=False),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/Sequencing/Applications/_Novoalign.py
|
Python
|
mit
| 8,246
|
[
"Biopython"
] |
a1be22cab51f2419d77abea4f39e55444d54b9a93fa0e7233ad11eb892ead50f
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pymysql
import dbInfo
import optparse
import smtplib
from email.message import EmailMessage
from smtplib import SMTPRecipientsRefused
import time
from datetime import timedelta, datetime
import mailInfo
emailIDs = ['spawns', 'activity']
def ghConn():
conn = pymysql.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
conn.autocommit(True)
return conn
def sendAlertMail(conn, userID, msgText, link, alertID, alertTitle, emailIndex):
# Don't try to send mail if we exceeded quota within last hour
lastFailureTime = datetime(2000, 1, 1, 12)
currentTime = datetime.fromtimestamp(time.time())
timeSinceFailure = currentTime - lastFailureTime
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt")
lastFailureTime = datetime.strptime(f.read().strip(), "%Y-%m-%d %H:%M:%S")
f.close()
timeSinceFailure = currentTime - lastFailureTime
except IOError as e:
sys.stdout.write("No last failure time\n")
if timeSinceFailure.days < 1 and timeSinceFailure.seconds < 3660:
sys.stderr.write(str(timeSinceFailure.seconds) + " less than 3660 no mail.\n")
return 1
# look up the user email
cursor = conn.cursor()
cursor.execute("SELECT emailAddress FROM tUsers WHERE userID='" + userID + "';")
row = cursor.fetchone()
if row == None:
result = "bad username"
else:
email = row[0]
if (email.find("@") > -1 and email.find(".") > -1):
# send message
message = EmailMessage()
message['From'] = "\"Galaxy Harvester Alerts\" <" + emailIDs[emailIndex] + "@galaxyharvester.net>"
message['To'] = email
message['Subject'] = "".join(("Galaxy Harvester ", alertTitle))
message.set_content("".join(("Hello ", userID, ",\n\n", msgText, "\n\n", link, "\n\n You can manage your alerts at http://galaxyharvester.net/myAlerts.py\n")))
message.add_alternative("".join(("<div><img src='http://galaxyharvester.net/images/ghLogoLarge.png'/></div><p>Hello ", userID, ",</p><br/><p>", msgText.replace("\n", "<br/>"), "</p><p><a style='text-decoration:none;' href='", link, "'><div style='width:170px;font-size:18px;font-weight:600;color:#feffa1;background-color:#003344;padding:8px;margin:4px;border:1px solid black;'>View in Galaxy Harvester</div></a><br/>or copy and paste link: ", link, "</p><br/><p>You can manage your alerts at <a href='http://galaxyharvester.net/myAlerts.py'>http://galaxyharvester.net/myAlerts.py</a></p><p>-Galaxy Harvester Administrator</p>")), subtype='html')
mailer = smtplib.SMTP(mailInfo.MAIL_HOST)
mailer.login(emailIDs[emailIndex] + "@galaxyharvester.net", mailInfo.MAIL_PASS)
try:
mailer.send_message(message)
result = 'email sent'
except SMTPRecipientsRefused as e:
result = 'email failed'
sys.stderr.write('Email failed - ' + str(e))
trackEmailFailure(datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S"), emailIndex)
mailer.quit()
# update alert status
if ( result == 'email sent' ):
cursor.execute('UPDATE tAlerts SET alertStatus=1, statusChanged=NOW() WHERE alertID=' + str(alertID) + ';')
else:
result = 'Invalid email.'
cursor.close()
def main():
emailIndex = 0
# check for command line argument for email to use
if len(sys.argv) > 1:
emailIndex = int(sys.argv[1])
conn = ghConn()
# try sending any backed up alert mails
retryPendingMail(conn, emailIndex)
def trackEmailFailure(failureTime, emailIndex):
# Update tracking file
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt", "w")
f.write(failureTime)
f.close()
except IOError as e:
sys.stderr.write("Could not write email failure tracking file")
def retryPendingMail(conn, emailIndex):
# open email alerts that have not been sucessfully sent less than 48 hours old
minTime = datetime.fromtimestamp(time.time()) - timedelta(days=4)
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTime, alertMessage, alertLink, alertID FROM tAlerts WHERE alertType=2 AND alertStatus=0 and alertTime > '" + minTime.strftime("%Y-%m-%d %H:%M:%S") + "' and alertMessage LIKE '% - %';")
row = cursor.fetchone()
# try to send as long as not exceeding quota
while row != None:
fullText = row[2]
splitPos = fullText.find(" - ")
alertTitle = fullText[:splitPos]
alertBody = fullText[splitPos+3:]
result = sendAlertMail(conn, row[0], alertBody, row[3], row[4], alertTitle, emailIndex)
if result == 1:
sys.stderr.write("Delayed retrying rest of mail since quota reached.\n")
break
row = cursor.fetchone()
cursor.close()
if __name__ == "__main__":
main()
|
pwillworth/galaxyharvester
|
catchupMail.py
|
Python
|
gpl-3.0
| 5,380
|
[
"Galaxy"
] |
64a2ca6a9f6606be227bbb060ee2364869e39ef1ee84c00359bbe5f1e1aa337f
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import traceback
import uuid
import webob
from oslo_config import cfg
from pecan.hooks import PecanHook
from six.moves.urllib import parse as urlparse
from webob import exc
from st2common import log as logging
from st2common.persistence.auth import User
from st2common.exceptions import db as db_exceptions
from st2common.exceptions import auth as auth_exceptions
from st2common.exceptions import rbac as rbac_exceptions
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.util.jsonify import json_encode
from st2common.util.auth import validate_token
from st2common.constants.api import REQUEST_ID_HEADER
from st2common.constants.auth import HEADER_ATTRIBUTE_NAME
from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME
LOG = logging.getLogger(__name__)
# A list of method names for which we don't want to log the result / response
RESPONSE_LOGGING_METHOD_NAME_BLACKLIST = [
'get_all'
]
# A list of controller classes for which we don't want to log the result / response
RESPONSE_LOGGING_CONTROLLER_NAME_BLACKLIST = [
'ActionExecutionChildrenController', # action executions can be big
'ActionExecutionAttributeController', # result can be big
'ActionExecutionsController' # action executions can be big
]
class CorsHook(PecanHook):
def after(self, state):
headers = state.response.headers
origin = state.request.headers.get('Origin')
origins = set(cfg.CONF.api.allow_origin)
# Build a list of the default allowed origins
public_api_url = cfg.CONF.auth.api_url
# Default gulp development server WebUI URL
origins.add('http://localhost:3000')
# By default WebUI simple http server listens on 8080
origins.add('http://localhost:8080')
origins.add('http://127.0.0.1:8080')
if public_api_url:
# Public API URL
origins.add(public_api_url)
if origin:
if '*' in origins:
origin_allowed = '*'
else:
# See http://www.w3.org/TR/cors/#access-control-allow-origin-response-header
origin_allowed = origin if origin in origins else 'null'
else:
origin_allowed = list(origins)[0]
methods_allowed = ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']
request_headers_allowed = ['Content-Type', 'Authorization', 'X-Auth-Token',
REQUEST_ID_HEADER]
response_headers_allowed = ['Content-Type', 'X-Limit', 'X-Total-Count',
REQUEST_ID_HEADER]
headers['Access-Control-Allow-Origin'] = origin_allowed
headers['Access-Control-Allow-Methods'] = ','.join(methods_allowed)
headers['Access-Control-Allow-Headers'] = ','.join(request_headers_allowed)
headers['Access-Control-Expose-Headers'] = ','.join(response_headers_allowed)
if not headers.get('Content-Length') \
and not headers.get('Content-type', '').startswith('text/event-stream'):
headers['Content-Length'] = str(len(state.response.body))
def on_error(self, state, e):
if state.request.method == 'OPTIONS':
return webob.Response()
class AuthHook(PecanHook):
def before(self, state):
# OPTIONS requests doesn't need to be authenticated
if state.request.method == 'OPTIONS':
return
token_db = self._validate_token(request=state.request)
try:
user_db = User.get(token_db.user)
except ValueError:
# User doesn't exist - we should probably also invalidate token if
# this happens
user_db = None
# Store token and related user object in the context
# Note: We also store token outside of auth dict for backward compatibility
state.request.context['token'] = token_db
state.request.context['auth'] = {
'token': token_db,
'user': user_db
}
if QUERY_PARAM_ATTRIBUTE_NAME in state.arguments.keywords:
del state.arguments.keywords[QUERY_PARAM_ATTRIBUTE_NAME]
def on_error(self, state, e):
if isinstance(e, auth_exceptions.TokenNotProvidedError):
LOG.exception('Token is not provided.')
return self._abort_unauthorized()
if isinstance(e, auth_exceptions.TokenNotFoundError):
LOG.exception('Token is not found.')
return self._abort_unauthorized()
if isinstance(e, auth_exceptions.TokenExpiredError):
LOG.exception('Token has expired.')
return self._abort_unauthorized()
@staticmethod
def _abort_unauthorized():
body = json_encode({
'faultstring': 'Unauthorized'
})
headers = {}
headers['Content-Type'] = 'application/json'
status = httplib.UNAUTHORIZED
return webob.Response(body=body, status=status, headers=headers)
@staticmethod
def _abort_other_errors():
body = json_encode({
'faultstring': 'Internal Server Error'
})
headers = {}
headers['Content-Type'] = 'application/json'
status = httplib.INTERNAL_SERVER_ERROR
return webob.Response(body=body, status=status, headers=headers)
@staticmethod
def _validate_token(request):
"""
Validate token provided either in headers or query parameters.
"""
headers = request.headers
query_string = request.query_string
query_params = dict(urlparse.parse_qsl(query_string))
token_in_headers = headers.get(HEADER_ATTRIBUTE_NAME, None)
token_in_query_params = query_params.get(QUERY_PARAM_ATTRIBUTE_NAME, None)
return validate_token(token_in_headers=token_in_headers,
token_in_query_params=token_in_query_params)
class JSONErrorResponseHook(PecanHook):
"""
Handle all the errors and respond with JSON.
"""
def on_error(self, state, e):
if hasattr(e, 'body') and isinstance(e.body, dict):
body = e.body
else:
body = {}
if isinstance(e, exc.HTTPException):
status_code = state.response.status
message = str(e)
elif isinstance(e, db_exceptions.StackStormDBObjectNotFoundError):
status_code = httplib.NOT_FOUND
message = str(e)
elif isinstance(e, db_exceptions.StackStormDBObjectConflictError):
status_code = httplib.CONFLICT
message = str(e)
body['conflict-id'] = e.conflict_id
elif isinstance(e, rbac_exceptions.AccessDeniedError):
status_code = httplib.FORBIDDEN
message = str(e)
elif isinstance(e, (ValueValidationException, ValueError)):
status_code = httplib.BAD_REQUEST
message = getattr(e, 'message', str(e))
else:
status_code = httplib.INTERNAL_SERVER_ERROR
message = 'Internal Server Error'
# Log the error
is_internal_server_error = status_code == httplib.INTERNAL_SERVER_ERROR
error_msg = getattr(e, 'comment', str(e))
extra = {
'exception_class': e.__class__.__name__,
'exception_message': str(e),
'exception_data': e.__dict__
}
if is_internal_server_error:
LOG.exception('API call failed: %s', error_msg, extra=extra)
LOG.exception(traceback.format_exc())
else:
LOG.debug('API call failed: %s', error_msg, extra=extra)
if cfg.CONF.debug:
LOG.debug(traceback.format_exc())
body['faultstring'] = message
response_body = json_encode(body)
headers = state.response.headers or {}
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(response_body))
return webob.Response(response_body, status=status_code, headers=headers)
class LoggingHook(PecanHook):
"""
Logs all incoming requests and outgoing responses
"""
def before(self, state):
# Note: We use getattr since in some places (tests) request is mocked
method = getattr(state.request, 'method', None)
path = getattr(state.request, 'path', None)
remote_addr = getattr(state.request, 'remote_addr', None)
# Log the incoming request
values = {'method': method, 'path': path, 'remote_addr': remote_addr}
values['filters'] = state.arguments.keywords
request_id = state.request.headers.get(REQUEST_ID_HEADER, None)
values['request_id'] = request_id
LOG.info('%(request_id)s - %(method)s %(path)s with filters=%(filters)s' %
values, extra=values)
def after(self, state):
# Note: We use getattr since in some places (tests) request is mocked
method = getattr(state.request, 'method', None)
path = getattr(state.request, 'path', None)
remote_addr = getattr(state.request, 'remote_addr', None)
request_id = state.request.headers.get(REQUEST_ID_HEADER, None)
# Log the outgoing response
values = {'method': method, 'path': path, 'remote_addr': remote_addr}
values['status_code'] = state.response.status
values['request_id'] = request_id
if hasattr(state.controller, 'im_self'):
function_name = state.controller.im_func.__name__
controller_name = state.controller.im_class.__name__
log_result = True
log_result &= function_name not in RESPONSE_LOGGING_METHOD_NAME_BLACKLIST
log_result &= controller_name not in RESPONSE_LOGGING_CONTROLLER_NAME_BLACKLIST
else:
log_result = False
if log_result:
values['result'] = state.response.body
log_msg = '%(request_id)s - %(method)s %(path)s result=%(result)s' % values
else:
# Note: We don't want to include a result for some
# methods which have a large result
log_msg = '%(request_id)s - %(method)s %(path)s' % values
LOG.info(log_msg, extra=values)
class RequestIDHook(PecanHook):
"""
If request id header isn't present, this hooks adds one.
"""
def before(self, state):
headers = getattr(state.request, 'headers', None)
if headers:
req_id_header = getattr(headers, REQUEST_ID_HEADER, None)
if not req_id_header:
req_id = str(uuid.uuid4())
state.request.headers[REQUEST_ID_HEADER] = req_id
def after(self, state):
req_headers = getattr(state.request, 'headers', None)
resp_headers = getattr(state.response, 'headers', None)
if req_headers and resp_headers:
req_id_header = req_headers.get(REQUEST_ID_HEADER, None)
if req_id_header:
resp_headers[REQUEST_ID_HEADER] = req_id_header
|
Itxaka/st2
|
st2common/st2common/hooks.py
|
Python
|
apache-2.0
| 11,784
|
[
"GULP"
] |
01be3c135be2088b077d877103932cc3fd3973ac07aeacde92de98a08c7c239b
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
# import funkcí z jiného adresáře
import os
import os.path
import pytest
path_to_script = os.path.dirname(os.path.abspath(__file__))
import unittest
import numpy as np
import sys
try:
import skelet3d
data3d = np.ones([3, 7, 9])
data3d[:, 3, 3:6] = 0
skelet3d.skelet3d(data3d)
skelet3d_installed = True
# skelet3d
except:
skelet3d_installed = False
logger.warning("skelet3d is not working")
try:
import larcc
larcc_installed = True
except:
larcc_installed = False
logger.warning("larcc is not working")
import teigen.tree
from teigen.tree import TreeBuilder
# There is some problem with VTK. Code seams to be fine but it fails
# Generic Warning: In /tmp/vtk20150408-2435-1y7p97u/VTK-6.2.0/Common/Core/vtkObjectBase.cxx, line 93
# Trying to delete object with non-zero reference count.
# ERROR: In /tmp/vtk20150408-2435-1y7p97u/VTK-6.2.0/Common/Core/vtkObject.cxx, line 156
# vtkObject (0x11a26e760): Trying to delete object with non-zero reference count.
VTK_MALLOC_PROBLEM = True
#
class TubeTreeTest(unittest.TestCase):
def setUp(self):
self.interactivetTest = False
# interactivetTest = True
@pytest.mark.LAR
@unittest.skipIf(not ("larcc" in sys.modules), "larcc is not installed")
def test_vessel_tree_lar(self):
import teigen.tb_lar
tvg = TreeBuilder(teigen.tb_lar.TBLar)
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
if self.interactiveTests:
tvg.show()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_nothing(self):
logger.debug("skelet3d_installed", skelet3d_installed)
# import ipdb; ipdb.set_trace()
self.assertTrue(False)
# @unittest.skip("test debug")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vessel_tree_vtk(self):
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
# tvg.show()
# tvg.saveToFile("tree_output.vtk")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
@unittest.skipIf(not ("skelet3d" in sys.modules), "skelet3d is not installed")
@unittest.skipIf(not skelet3d_installed, "skelet3d is not installed")
def test_vessel_tree_vtk_from_skeleton(self):
logger.debug("skelet3d_installed", skelet3d_installed)
import skelet3d
import skelet3d.skeleton_analyser
import shutil
fn_out = 'tree.vtk'
if os.path.exists(fn_out):
os.remove(fn_out)
volume_data = np.zeros([3, 7, 9], dtype=np.int)
volume_data[:, :, 1:3] = 1
volume_data[:, 5, 2:9] = 1
volume_data[:, 0:7, 5] = 1
skelet = skelet3d.skelet3d(volume_data)
skan = skelet3d.skeleton_analyser.SkeletonAnalyser(skelet, volume_data=volume_data, voxelsize_mm=[1, 1, 1])
stats = skan.skeleton_analysis()
tvg = TreeBuilder('vtk')
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
tvg.tree_data = stats
output = tvg.buildTree() # noqa
tvg.saveToFile(fn_out)
os.path.exists(fn_out)
# TODO finish this test
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vessel_tree_vol(self):
import teigen.tb_volume
tvg = TreeBuilder(teigen.tb_volume.TBVolume)
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
# tvg.show()
# if self.interactiveTests:
# tvg.show()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_import_new_vt_format(self):
tvg = TreeBuilder()
yaml_path = os.path.join(path_to_script, "vt_biodur.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [150, 150, 150]
data3d = tvg.buildTree()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_cylinders_generator(self):
from teigen.generators.cylinders import CylinderGenerator
cg = CylinderGenerator()
cg.run()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vtk_tree(self):
import numpy as np
tree_data = {
}
element_number = 1
area_size = 100
radius = 5
np.random.seed(0)
pts1 = np.random.random([element_number, 3]) * (area_size - 4 * radius) + 2 * radius
pts2 = np.random.random([element_number, 3]) * (area_size - 4 * radius) + 2 * radius
for i in range(element_number):
edge = {
# "nodeA_ZYX_mm": vor3.vertices[simplex],
# "nodeB_ZYX_mm": vor3.vertices[simplex],
"nodeA_ZYX_mm": pts1[i],
"nodeB_ZYX_mm": pts2[i],
"radius_mm": radius
}
tree_data[i] = edge
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
# tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [area_size, area_size, area_size]
tvg.tree_data = tree_data
output = tvg.buildTree() # noqa
# tvg.show()
tvg.saveToFile("test_tree_output.vtk")
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_tree_generator(self):
import numpy as np
tree_data = {
}
element_number = 6
np.random.seed(0)
pts = np.random.random([element_number, 3]) * 100
# construct voronoi
import scipy.spatial
import itertools
vor3 = scipy.spatial.Voronoi(pts)
# for i, two_points in enumerate(vor3.ridge_points):
for i, simplex in enumerate(vor3.ridge_vertices):
simplex = np.asarray(simplex)
# fallowing line removes all ridges with oulayers
simplex = simplex[simplex > 0]
if np.all(simplex >= 0):
x = vor3.vertices[simplex, 0]
y = vor3.vertices[simplex, 1]
z = vor3.vertices[simplex, 2]
for two_points in itertools.combinations(simplex, 2):
edge = {
# "nodeA_ZYX_mm": vor3.vertices[simplex],
# "nodeB_ZYX_mm": vor3.vertices[simplex],
"nodeA_ZYX_mm": vor3.vertices[two_points[0]],
"nodeB_ZYX_mm": vor3.vertices[two_points[1]],
"radius_mm": 2
}
tree_data[i] = edge
else:
pass
show_input_points = False
if show_input_points:
length = len(tree_data)
for i in range(element_number):
edge = {
# #"nodeA_ZYX_mm": np.random.random(3) * 100,
"nodeA_ZYX_mm": pts[i - 1],
"nodeB_ZYX_mm": pts[i],
# "nodeB_ZYX_mm": np.random.random(3) * 100,
"radius_mm": 1
}
tree_data[i + length] = edge
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
# tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
tvg.tree_data = tree_data
output = tvg.buildTree() # noqa
# tvg.show()
tvg.saveToFile("test_tree_output.vtk")
tvgvol = TreeBuilder('vol')
tvgvol.voxelsize_mm = [1, 1, 1]
tvgvol.shape = [100, 100, 100]
tvgvol.tree_data = tree_data
outputvol = tvgvol.buildTree()
tvgvol.saveToFile("tree_volume.pklz")
# self.assertTrue(False)
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_io3d(self):
import io3d
data3d = np.zeros([10, 10, 10])
segmentation = np.zeros([10, 10, 10])
data3d[2:7, :3:5, :6] = 100
datap = {
"data3d": data3d,
# "segmentation": segmentation,
"voxelsize_mm": [1, 1, 1]
}
io3d.write(datap, "file1.pklz")
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_skimage_io_imsave(self):
import skimage.io
data3d = np.zeros([10, 10, 10])
segmentation = np.zeros([10, 10, 10])
data3d[2:7, :3:5, :6] = 100
datap = {
"data3d": data3d,
# "segmentation": segmentation,
"voxelsize_mm": [1, 1, 1]
}
skimage.io.imsave("skiamge.png", data3d[0])
if __name__ == "__main__":
unittest.main()
|
mjirik/teigen
|
tests/tube_tree_test.py
|
Python
|
apache-2.0
| 9,295
|
[
"VTK"
] |
465f62296756b6d822a2cd602e3452a28a84b3a21d0877fbd40f1e8a91e4caf1
|
from rest_framework import serializers
from elixir.models import *
from elixir.validators import *
class LegacyDownloadSerializer(serializers.ModelSerializer):
url = serializers.CharField(allow_blank=False, validators=[IsURLFTPValidator], required=True)
type = serializers.CharField(allow_blank=True, max_length=300, min_length=1, required=True)
comment = serializers.CharField(allow_blank=True, min_length=10, max_length=1000, validators=[IsStringTypeValidator], required=False, source='note')
class Meta:
model = Download
fields = ('url', 'type', 'comment')
def validate_type(self, attrs):
enum = ENUMValidator([u'API specification', u'Biological data', u'Binaries', u'Binary package', u'Command-line specification', u'Container file', u'CWL file', u'Icon', u'Ontology', u'Screenshot', u'Source code', u'Source package', u'Test data', u'Test script', u'Tool wrapper (galaxy)', u'Tool wrapper (taverna)', u'Tool wrapper (other)', u'VM image'])
attrs = enum(attrs)
return attrs
# TODO: add to interfaces
# cmd == "A useful command pertinent to the download, e.g. for getting or installing a tool."
# version == "Version information (typically a version number) of the software applicable to this download."
def validate_version(self, attrs):
# make sure the version matches the regular expression
#p = re.compile('^[\p{Zs}A-Za-z0-9+\.,\-_:;()]*$', re.IGNORECASE | re.UNICODE)
#this is ok, only allow spaces
p = re.compile('^[ A-Za-z0-9+\.,\-_:;()]*$', re.IGNORECASE | re.UNICODE)
if not p.search(attrs):
raise serializers.ValidationError('This field can only contain letters, numbers, spaces or these characters: + . , - _ : ; ( )')
return attrs
def get_pk_field(self, model_field):
return None
|
bio-tools/biotoolsregistry
|
backend/elixir/serialization/resource_serialization/v2/download.py
|
Python
|
gpl-3.0
| 1,739
|
[
"Galaxy"
] |
b7973b0aabadcd1d5387d3df2e8e822b7218ddc10360884a3275cc566806bd87
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for interfacing with phonopy, see https://atztogo.github.io/phonopy/
"""
from typing import Any, Dict, List
import numpy as np
from monty.dev import requires
from monty.serialization import loadfn
from scipy.interpolate import InterpolatedUnivariateSpline
from pymatgen.core import Lattice, Structure
from pymatgen.phonon.bandstructure import (
PhononBandStructure,
PhononBandStructureSymmLine,
)
from pymatgen.phonon.dos import CompletePhononDos, PhononDos
from pymatgen.phonon.gruneisen import (
GruneisenParameter,
GruneisenPhononBandStructureSymmLine,
)
from pymatgen.symmetry.bandstructure import HighSymmKpath
try:
from phonopy import Phonopy
from phonopy.file_IO import write_disp_yaml
from phonopy.structure.atoms import PhonopyAtoms
except ImportError:
Phonopy = None
write_disp_yaml = None
PhonopyAtoms = None
@requires(Phonopy, "phonopy not installed!") # type: ignore
def get_pmg_structure(phonopy_structure):
"""
Convert a PhonopyAtoms object to pymatgen Structure object.
Args:
phonopy_structure (PhonopyAtoms): A phonopy structure object.
"""
lattice = phonopy_structure.get_cell()
frac_coords = phonopy_structure.get_scaled_positions()
symbols = phonopy_structure.get_chemical_symbols()
masses = phonopy_structure.get_masses()
mms = phonopy_structure.get_magnetic_moments()
mms = mms or [0] * len(symbols)
return Structure(
lattice,
symbols,
frac_coords,
site_properties={"phonopy_masses": masses, "magnetic_moments": mms},
)
@requires(Phonopy, "phonopy not installed!") # type: ignore
def get_phonopy_structure(pmg_structure):
"""
Convert a pymatgen Structure object to a PhonopyAtoms object.
Args:
pmg_structure (pymatgen Structure): A Pymatgen structure object.
"""
symbols = [site.specie.symbol for site in pmg_structure]
return PhonopyAtoms(
symbols=symbols,
cell=pmg_structure.lattice.matrix,
scaled_positions=pmg_structure.frac_coords,
)
def get_structure_from_dict(d):
"""
Extracts a structure from the dictionary extracted from the output
files of phonopy like phonopy.yaml or band.yaml.
Adds "phonopy_masses" in the site_properties of the structures.
Compatible with older phonopy versions.
"""
species = []
frac_coords = []
masses = []
if "points" in d:
for p in d["points"]:
species.append(p["symbol"])
frac_coords.append(p["coordinates"])
masses.append(p["mass"])
elif "atoms" in d:
for p in d["atoms"]:
species.append(p["symbol"])
frac_coords.append(p["position"])
masses.append(p["mass"])
else:
raise ValueError("The dict does not contain structural information")
return Structure(d["lattice"], species, frac_coords, site_properties={"phonopy_masses": masses})
def eigvec_to_eigdispl(v, q, frac_coords, mass):
r"""
Converts a single eigenvector to an eigendisplacement in the primitive cell
according to the formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
Compared to the modulation option in phonopy, here all the additional
multiplicative and phase factors are set to 1.
Args:
v: the vector that should be converted. A 3D complex numpy array.
q: the q point in fractional coordinates
frac_coords: the fractional coordinates of the atom
mass: the mass of the atom
"""
c = np.exp(2j * np.pi * np.dot(frac_coords, q)) / np.sqrt(mass)
return c * v
def get_ph_bs_symm_line_from_dict(bands_dict, has_nac=False, labels_dict=None):
r"""
Creates a pymatgen PhononBandStructure object from the dictionary
extracted by the band.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_dict: the dictionary extracted from the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
"""
structure = get_structure_from_dict(bands_dict)
qpts = []
frequencies = []
eigendisplacements = []
phonopy_labels_dict = {}
for p in bands_dict["phonon"]:
q = p["q-position"]
qpts.append(q)
bands = []
eig_q = []
for b in p["band"]:
bands.append(b["frequency"])
if "eigenvector" in b:
eig_b = []
for i, eig_a in enumerate(b["eigenvector"]):
v = np.zeros(3, np.complex)
for x in range(3):
v[x] = eig_a[x][0] + eig_a[x][1] * 1j
eig_b.append(
eigvec_to_eigdispl(
v,
q,
structure[i].frac_coords,
structure.site_properties["phonopy_masses"][i],
)
)
eig_q.append(eig_b)
frequencies.append(bands)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
if eig_q:
eigendisplacements.append(eig_q)
qpts = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies = np.transpose(frequencies)
if eigendisplacements:
eigendisplacements = np.transpose(eigendisplacements, (1, 0, 2, 3))
rec_latt = Lattice(bands_dict["reciprocal_lattice"])
labels_dict = labels_dict or phonopy_labels_dict
ph_bs = PhononBandStructureSymmLine(
qpts,
frequencies,
rec_latt,
has_nac=has_nac,
labels_dict=labels_dict,
structure=structure,
eigendisplacements=eigendisplacements,
)
return ph_bs
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None):
r"""
Creates a pymatgen PhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_path: path to the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
"""
return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict)
def get_ph_dos(total_dos_path):
"""
Creates a pymatgen PhononDos from a total_dos.dat file.
Args:
total_dos_path: path to the total_dos.dat file.
"""
a = np.loadtxt(total_dos_path)
return PhononDos(a[:, 0], a[:, 1])
def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path):
"""
Creates a pymatgen CompletePhononDos from a partial_dos.dat and
phonopy.yaml files.
The second is produced when generating a Dos and is needed to extract
the structure.
Args:
partial_dos_path: path to the partial_dos.dat file.
phonopy_yaml_path: path to the phonopy.yaml file.
"""
a = np.loadtxt(partial_dos_path).transpose()
d = loadfn(phonopy_yaml_path)
structure = get_structure_from_dict(d["primitive_cell"])
total_dos = PhononDos(a[0], a[1:].sum(axis=0))
pdoss = {}
for site, pdos in zip(structure, a[1:]):
pdoss[site] = pdos.tolist()
return CompletePhononDos(structure, total_dos, pdoss)
@requires(Phonopy, "phonopy not installed!")
def get_displaced_structures(pmg_structure, atom_disp=0.01, supercell_matrix=None, yaml_fname=None, **kwargs):
r"""
Generate a set of symmetrically inequivalent displaced structures for
phonon calculations.
Args:
pmg_structure (Structure): A pymatgen structure object.
atom_disp (float): Atomic displacement. Default is 0.01 $\\AA$.
supercell_matrix (3x3 array): Scaling matrix for supercell.
yaml_fname (string): If not None, it represents the full path to
the outputting displacement yaml file, e.g. disp.yaml.
**kwargs: Parameters used in Phonopy.generate_displacement method.
Return:
A list of symmetrically inequivalent structures with displacements, in
which the first element is the perfect supercell structure.
"""
is_plusminus = kwargs.get("is_plusminus", "auto")
is_diagonal = kwargs.get("is_diagonal", True)
is_trigonal = kwargs.get("is_trigonal", False)
ph_structure = get_phonopy_structure(pmg_structure)
if supercell_matrix is None:
supercell_matrix = np.eye(3) * np.array((1, 1, 1))
phonon = Phonopy(unitcell=ph_structure, supercell_matrix=supercell_matrix)
phonon.generate_displacements(
distance=atom_disp,
is_plusminus=is_plusminus,
is_diagonal=is_diagonal,
is_trigonal=is_trigonal,
)
if yaml_fname is not None:
displacements = phonon.get_displacements()
write_disp_yaml(
displacements=displacements,
supercell=phonon.get_supercell(),
filename=yaml_fname,
)
# Supercell structures with displacement
disp_supercells = phonon.get_supercells_with_displacements()
# Perfect supercell structure
init_supercell = phonon.get_supercell()
# Structure list to be returned
structure_list = [get_pmg_structure(init_supercell)]
for c in disp_supercells:
if c is not None:
structure_list.append(get_pmg_structure(c))
return structure_list
@requires(Phonopy, "phonopy is required to calculate phonon density of states")
def get_phonon_dos_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
mesh_density: float = 100.0,
num_dos_steps: int = 200,
**kwargs,
) -> CompletePhononDos:
"""
Get a projected phonon density of states from phonopy force constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
mesh_density: The density of the q-point mesh. See the docstring
for the ``mesh`` argument in Phonopy.init_mesh() for more details.
num_dos_steps: Number of frequency steps in the energy grid.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The density of states.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, **kwargs)
phonon.set_force_constants(force_constants)
phonon.run_mesh(
mesh_density,
is_mesh_symmetry=False,
with_eigenvectors=True,
is_gamma_center=True,
)
# get min, max, step frequency
frequencies = phonon.get_mesh_dict()["frequencies"]
freq_min = frequencies.min()
freq_max = frequencies.max()
freq_pitch = (freq_max - freq_min) / num_dos_steps
phonon.run_projected_dos(freq_min=freq_min, freq_max=freq_max, freq_pitch=freq_pitch)
dos_raw = phonon.projected_dos.get_partial_dos()
pdoss = dict(zip(structure, dos_raw[1]))
total_dos = PhononDos(dos_raw[0], dos_raw[1].sum(axis=0))
return CompletePhononDos(structure, total_dos, pdoss)
@requires(Phonopy, "phonopy is required to calculate phonon band structures")
def get_phonon_band_structure_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
mesh_density: float = 100.0,
**kwargs,
) -> PhononBandStructure:
"""
Get a uniform phonon band structure from phonopy force constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
mesh_density: The density of the q-point mesh. See the docstring
for the ``mesh`` argument in Phonopy.init_mesh() for more details.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The uniform phonon band structure.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, **kwargs)
phonon.set_force_constants(force_constants)
phonon.run_mesh(mesh_density, is_mesh_symmetry=False, is_gamma_center=True)
mesh = phonon.get_mesh_dict()
return PhononBandStructure(mesh["qpoints"], mesh["frequencies"], structure.lattice)
@requires(Phonopy, "phonopy is required to calculate phonon band structures")
def get_phonon_band_structure_symm_line_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
line_density: float = 20.0,
symprec: float = 0.01,
**kwargs,
) -> PhononBandStructureSymmLine:
"""
Get a phonon band structure along a high symmetry path from phonopy force
constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
line_density: The density along the high symmetry path.
symprec: Symmetry precision passed to phonopy and used for determining
the band structure path.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The line mode band structure.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, symprec=symprec, **kwargs)
phonon.set_force_constants(force_constants)
kpath = HighSymmKpath(structure, symprec=symprec)
kpoints, labels = kpath.get_kpoints(line_density=line_density, coords_are_cartesian=False)
phonon.run_qpoints(kpoints)
frequencies = phonon.qpoints.get_frequencies().T
labels_dict = {a: k for a, k in zip(labels, kpoints) if a != ""}
return PhononBandStructureSymmLine(kpoints, frequencies, structure.lattice, labels_dict=labels_dict)
def get_gruneisenparameter(gruneisen_path, structure=None, structure_path=None) -> GruneisenParameter:
"""
Get Gruneisen object from gruneisen.yaml file, as obtained from phonopy (Frequencies in THz!).
The order is structure > structure path > structure from gruneisen dict.
Newer versions of phonopy include the structure in the yaml file,
the structure/structure_path is kept for compatibility.
Args:
gruneisen_path: Path to gruneisen.yaml file (frequencies have to be in THz!)
structure: pymatgen Structure object
structure_path: path to structure in a file (e.g., POSCAR)
Returns: GruneisenParameter object
"""
gruneisen_dict = loadfn(gruneisen_path)
if structure_path and structure is None:
structure = Structure.from_file(structure_path)
else:
try:
structure = get_structure_from_dict(gruneisen_dict)
except ValueError:
raise ValueError("\nPlease provide a structure.\n")
qpts, multiplicities, frequencies, gruneisen = ([] for _ in range(4))
phonopy_labels_dict = {}
for p in gruneisen_dict["phonon"]:
q = p["q-position"]
qpts.append(q)
if "multiplicity" in p:
m = p["multiplicity"]
else:
m = 1
multiplicities.append(m)
bands, gruneisenband = ([] for _ in range(2))
for b in p["band"]:
bands.append(b["frequency"])
if "gruneisen" in b:
gruneisenband.append(b["gruneisen"])
frequencies.append(bands)
gruneisen.append(gruneisenband)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
qpts_np = np.array(qpts)
multiplicities_np = np.array(multiplicities)
# transpose to match the convention in PhononBandStructure
frequencies_np = np.transpose(frequencies)
gruneisen_np = np.transpose(gruneisen)
return GruneisenParameter(
gruneisen=gruneisen_np,
qpoints=qpts_np,
multiplicities=multiplicities_np,
frequencies=frequencies_np,
structure=structure,
)
def get_gs_ph_bs_symm_line_from_dict(
gruneisen_dict, structure=None, structure_path=None, labels_dict=None, fit=False
) -> GruneisenPhononBandStructureSymmLine:
r"""
Creates a pymatgen GruneisenPhononBandStructure object from the dictionary
extracted by the gruneisen.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object. A fit algorithm can be used to replace diverging
Gruneisen values close to gamma.
Args:
gruneisen_dict (dict): the dictionary extracted from the gruneisen.yaml file
structure (Structure): pymatgen structure object
structure_path: path to structure file
labels_dict (dict): dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
fit (bool): Substitute Grueneisen parameters close to the gamma point
with points obtained from a fit to a spline if the derivate from
a smooth curve (i.e. if the slope changes by more than 200% in the
range of 10% around the gamma point).
These derivations occur because of very small frequencies
(and therefore numerical inaccuracies) close to gamma.
"""
if structure_path and structure is None:
structure = Structure.from_file(structure_path)
else:
try:
structure = get_structure_from_dict(gruneisen_dict)
except ValueError:
raise ValueError("\nPlease provide a structure.\n")
qpts, frequencies, gruneisenparameters = ([] for _ in range(3))
phonopy_labels_dict = {} # type: Dict[Any,Any]
if fit:
for pa in gruneisen_dict["path"]:
phonon = pa["phonon"] # This is a list
start = pa["phonon"][0]
end = pa["phonon"][-1]
if start["q-position"] == [0, 0, 0]: # Gamma at start of band
qpts_temp, frequencies_temp, gruneisen_temp, distance = (
[] for _ in range(4)
) # type: List[Any],List[Any],List[Any],List[Any]
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2)) # type: List[Any], List[Any]
for b in phonon[pa["nqpoint"] - i - 1]["band"]:
bands.append(b["frequency"])
# Fraction of leftover points in current band
gruen = _extrapolate_grun(b, distance, gruneisen_temp, gruneisenband, i, pa)
gruneisenband.append(gruen)
q = phonon[pa["nqpoint"] - i - 1]["q-position"]
qpts_temp.append(q)
d = phonon[pa["nqpoint"] - i - 1]["distance"]
distance.append(d)
frequencies_temp.append(bands)
gruneisen_temp.append(gruneisenband)
if "label" in phonon[pa["nqpoint"] - i - 1]:
phonopy_labels_dict[phonon[pa["nqpoint"] - i - 1]]["label"] = phonon[pa["nqpoint"] - i - 1][
"q-position"
]
qpts.extend(list(reversed(qpts_temp)))
frequencies.extend(list(reversed(frequencies_temp)))
gruneisenparameters.extend(list(reversed(gruneisen_temp)))
elif end["q-position"] == [0, 0, 0]: # Gamma at end of band
distance = []
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2))
for b in phonon[i]["band"]:
bands.append(b["frequency"])
gruen = _extrapolate_grun(b, distance, gruneisenparameters, gruneisenband, i, pa)
gruneisenband.append(gruen)
q = phonon[i]["q-position"]
qpts.append(q)
d = phonon[i]["distance"]
distance.append(d)
frequencies.append(bands)
gruneisenparameters.append(gruneisenband)
if "label" in phonon[i]:
phonopy_labels_dict[phonon[i]["label"]] = phonon[i]["q-position"]
else: # No Gamma in band
distance = []
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2))
for b in phonon[i]["band"]:
bands.append(b["frequency"])
gruneisenband.append(b["gruneisen"])
q = phonon[i]["q-position"]
qpts.append(q)
d = phonon[i]["distance"]
distance.append(d)
frequencies.append(bands)
gruneisenparameters.append(gruneisenband)
if "label" in phonon[i]:
phonopy_labels_dict[phonon[i]["label"]] = phonon[i]["q-position"]
else:
for pa in gruneisen_dict["path"]:
for p in pa["phonon"]:
q = p["q-position"]
qpts.append(q)
bands, gruneisen_bands = ([] for _ in range(2))
for b in p["band"]:
bands.append(b["frequency"])
gruneisen_bands.append(b["gruneisen"])
frequencies.append(bands)
gruneisenparameters.append(gruneisen_bands)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
qpts_np = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies_np = np.transpose(frequencies)
gruneisenparameters_np = np.transpose(gruneisenparameters)
rec_latt = structure.lattice.reciprocal_lattice
labels_dict = labels_dict or phonopy_labels_dict
return GruneisenPhononBandStructureSymmLine(
qpoints=qpts_np,
frequencies=frequencies_np,
gruneisenparameters=gruneisenparameters_np,
lattice=rec_latt,
labels_dict=labels_dict,
structure=structure,
eigendisplacements=None,
)
def _extrapolate_grun(b, distance, gruneisenparameter, gruneisenband, i, pa):
leftover_fraction = (pa["nqpoint"] - i - 1) / pa["nqpoint"]
if leftover_fraction < 0.1:
diff = abs(b["gruneisen"] - gruneisenparameter[-1][len(gruneisenband)]) / abs(
gruneisenparameter[-2][len(gruneisenband)] - gruneisenparameter[-1][len(gruneisenband)]
)
if diff > 2:
x = list(range(len(distance)))
y = [i[len(gruneisenband)] for i in gruneisenparameter]
y = y[-len(x) :] # Only elements of current band
extrapolator = InterpolatedUnivariateSpline(x, y, k=5)
g_extrapolated = extrapolator(len(distance))
gruen = float(g_extrapolated)
else:
gruen = b["gruneisen"]
else:
gruen = b["gruneisen"]
return gruen
def get_gruneisen_ph_bs_symm_line(gruneisen_path, structure=None, structure_path=None, labels_dict=None, fit=False):
r"""
Creates a pymatgen GruneisenPhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
gruneisen_path: path to the band.yaml file
structure: pymaten Structure object
structure_path: path to a structure file (e.g., POSCAR)
labels_dict: dict that links a qpoint in frac coords to a label.
fit: Substitute Grueneisen parameters close to the gamma point
with points obtained from a fit to a spline if the derivate from
a smooth curve (i.e. if the slope changes by more than 200% in the
range of 10% around the gamma point).
These derivations occur because of very small frequencies
(and therefore numerical inaccuracies) close to gamma.
"""
return get_gs_ph_bs_symm_line_from_dict(loadfn(gruneisen_path), structure, structure_path, labels_dict, fit)
|
materialsproject/pymatgen
|
pymatgen/io/phonopy.py
|
Python
|
mit
| 25,305
|
[
"phonopy",
"pymatgen"
] |
a8513f349d31cf16c06f4d4c015dc041f0c1f02ef8d2172a433ecf4c8d134b1a
|
#
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
import warnings
import numpy as np
import six
import astropy.io.fits as pf
from scipy.integrate import trapz
from astropy.utils import data
DATAURL = "https://healpy.github.io/healpy-data/"
DATAURL_MIRROR = "https://github.com/healpy/healpy-data/releases/download/"
from . import _healpy_sph_transform_lib as sphtlib
from . import _sphtools as _sphtools
from . import cookbook as cb
import os.path
from . import pixelfunc
from .pixelfunc import maptype, UNSEEN, ma_to_array, accept_ma
class FutureChangeWarning(UserWarning):
pass
DATAPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
MAX_NSIDE = (
8192 # The maximum nside up to which most operations (e.g. map2alm) will work
)
# Spherical harmonics transformation
def anafast(
map1,
map2=None,
nspec=None,
lmax=None,
mmax=None,
iter=3,
alm=False,
pol=True,
use_weights=False,
datapath=None,
gal_cut=0,
use_pixel_weights=False,
):
"""Computes the power spectrum of a Healpix map, or the cross-spectrum
between two maps if *map2* is given.
No removal of monopole or dipole is performed. The input maps must be
in ring-ordering.
Spherical harmonics transforms in HEALPix are always on the full sky,
if the map is masked, those pixels are set to 0. It is recommended to
remove monopole from the map before running `anafast` to reduce
boundary effects.
Parameters
----------
map1 : float, array-like shape (Npix,) or (3, Npix)
Either an array representing a map, or a sequence of 3 arrays
representing I, Q, U maps. Must be in ring ordering.
map2 : float, array-like shape (Npix,) or (3, Npix)
Either an array representing a map, or a sequence of 3 arrays
representing I, Q, U maps. Must be in ring ordering.
nspec : None or int, optional
The number of spectra to return. If None, returns all, otherwise
returns cls[:nspec]
lmax : int, scalar, optional
Maximum l of the power spectrum (default: 3*nside-1)
mmax : int, scalar, optional
Maximum m of the alm (default: lmax)
iter : int, scalar, optional
Number of iteration (default: 3)
alm : bool, scalar, optional
If True, returns both cl and alm, otherwise only cl is returned
pol : bool, optional
If True, assumes input maps are TQU. Output will be TEB cl's and
correlations (input must be 1 or 3 maps).
If False, maps are assumed to be described by spin 0 spherical harmonics.
(input can be any number of maps)
If there is only one input map, it has no effect. Default: True.
datapath : None or str, optional
If given, the directory where to find the weights data.
gal_cut : float [degrees]
pixels at latitude in [-gal_cut;+gal_cut] are not taken into account
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
See the map2alm docs for details about weighting
Returns
-------
res : array or sequence of arrays
If *alm* is False, returns cl or a list of cl's (TT, EE, BB, TE, EB, TB for
polarized input map)
Otherwise, returns a tuple (cl, alm), where cl is as above and
alm is the spherical harmonic transform or a list of almT, almE, almB
for polarized input
"""
map1 = ma_to_array(map1)
alms1 = map2alm(
map1,
lmax=lmax,
mmax=mmax,
pol=pol,
iter=iter,
use_weights=use_weights,
datapath=datapath,
gal_cut=gal_cut,
use_pixel_weights=use_pixel_weights,
)
if map2 is not None:
map2 = ma_to_array(map2)
alms2 = map2alm(
map2,
lmax=lmax,
mmax=mmax,
pol=pol,
iter=iter,
use_weights=use_weights,
datapath=datapath,
gal_cut=gal_cut,
use_pixel_weights=use_pixel_weights,
)
else:
alms2 = None
cls = alm2cl(alms1, alms2=alms2, lmax=lmax, mmax=mmax, lmax_out=lmax, nspec=nspec)
if alm:
if map2 is not None:
return (cls, alms1, alms2)
else:
return (cls, alms1)
else:
return cls
def map2alm(
maps,
lmax=None,
mmax=None,
iter=3,
pol=True,
use_weights=False,
datapath=None,
gal_cut=0,
use_pixel_weights=False,
verbose=True,
):
"""Computes the alm of a Healpix map. The input maps must all be
in ring ordering.
Pixel values are weighted before applying the transform:
* when you don't specify any weights, the uniform weight value 4*pi/n_pix is used
* with ring weights enabled (use_weights=True), pixels in every ring
are weighted with a uniform value similar to the one above, ring weights are
included in healpy
* with pixel weights (use_pixel_weights=True), every pixel gets an individual weight
Pixel weights provide the most accurate transform, so you should always use them if
possible. However they are not included in healpy and will be automatically downloaded
and cached in ~/.astropy the first time you compute a trasform at a specific nside.
If datapath is specified, healpy will first check that local folder before downloading
the weights.
The easiest way to setup the folder is to clone the healpy-data repository:
git clone --depth 1 https://github.com/healpy/healpy-data
and add the NSIDE 8192 weight from https://github.com/healpy/healpy-data/releases
and set datapath to the root of the repository.
Parameters
----------
maps : array-like, shape (Npix,) or (n, Npix)
The input map or a list of n input maps. Must be in ring ordering.
lmax : int, scalar, optional
Maximum l of the power spectrum. Default: 3*nside-1
mmax : int, scalar, optional
Maximum m of the alm. Default: lmax
iter : int, scalar, optional
Number of iteration (default: 3)
pol : bool, optional
If True, assumes input maps are TQU. Output will be TEB alm's.
(input must be 1 or 3 maps)
If False, apply spin 0 harmonic transform to each map.
(input can be any number of maps)
If there is only one input map, it has no effect. Default: True.
use_weights: bool, scalar, optional
If True, use the ring weighting. Default: False.
datapath : None or str, optional
If given, the directory where to find the weights data.
gal_cut : float [degrees]
pixels at latitude in [-gal_cut;+gal_cut] are not taken into account
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
verbose : bool, optional
If True prints diagnostic information. Default: True
Returns
-------
alms : array or tuple of array
alm or a tuple of 3 alm (almT, almE, almB) if polarized input.
Notes
-----
The pixels which have the special `UNSEEN` value are replaced by zeros
before spherical harmonic transform. They are converted back to `UNSEEN`
value, so that the input maps are not modified. Each map have its own,
independent mask.
"""
maps = ma_to_array(maps)
info = maptype(maps)
nside = pixelfunc.get_nside(maps)
check_max_nside(nside)
pixel_weights_filename = None
if use_pixel_weights:
if use_weights:
raise RuntimeError("Either use pixel or ring weights")
filename = "full_weights/healpix_full_weights_nside_%04d.fits" % nside
if datapath is not None:
pixel_weights_filename = os.path.join(datapath, filename)
if os.path.exists(pixel_weights_filename):
if verbose:
warnings.warn(
"Accessing pixel weights from {}".format(pixel_weights_filename)
)
else:
raise RuntimeError(
"You specified datapath but pixel weights file"
"is missing at {}".format(pixel_weights_filename)
)
if pixel_weights_filename is None:
with data.conf.set_temp("dataurl", DATAURL), data.conf.set_temp(
"dataurl_mirror", DATAURL_MIRROR
), data.conf.set_temp("remote_timeout", 30):
pixel_weights_filename = data.get_pkg_data_filename(
filename, package="healpy"
)
if pol or info in (0, 1):
alms = _sphtools.map2alm(
maps,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
else:
# info >= 2 and pol is False : spin 0 spht for each map
alms = [
_sphtools.map2alm(
mm,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
for mm in maps
]
return np.array(alms)
def alm2map(
alms,
nside,
lmax=None,
mmax=None,
pixwin=False,
fwhm=0.0,
sigma=None,
pol=True,
inplace=False,
verbose=True,
):
"""Computes a Healpix map given the alm.
The alm are given as a complex array. You can specify lmax
and mmax, or they will be computed from array size (assuming
lmax==mmax).
Parameters
----------
alms : complex, array or sequence of arrays
A complex array or a sequence of complex arrays.
Each array must have a size of the form: mmax * (2 * lmax + 1 - mmax) / 2 + lmax + 1
nside : int, scalar
The nside of the output map.
lmax : None or int, scalar, optional
Explicitly define lmax (needed if mmax!=lmax)
mmax : None or int, scalar, optional
Explicitly define mmax (needed if mmax!=lmax)
pixwin : bool, optional
Smooth the alm using the pixel window functions. Default: False.
fwhm : float, scalar, optional
The fwhm of the Gaussian used to smooth the map (applied on alm)
[in radians]
sigma : float, scalar, optional
The sigma of the Gaussian used to smooth the map (applied on alm)
[in radians]
pol : bool, optional
If True, assumes input alms are TEB. Output will be TQU maps.
(input must be 1 or 3 alms)
If False, apply spin 0 harmonic transform to each alm.
(input can be any number of alms)
If there is only one input alm, it has no effect. Default: True.
inplace : bool, optional
If True, input alms may be modified by pixel window function and beam
smoothing (if alm(s) are complex128 contiguous arrays).
Otherwise, input alms are not modified. A copy is made if needed to
apply beam smoothing or pixel window.
Returns
-------
maps : array or list of arrays
A Healpix map in RING scheme at nside or a list of T,Q,U maps (if
polarized input)
Notes
-----
Running map2alm then alm2map will not return exactly the same map if the discretized field you construct on the sphere is not band-limited (for example, if you have a map containing pixel-based noise rather than beam-smoothed noise). If you need a band-limited map, you have to start with random numbers in lm space and transform these via alm2map. With such an input, the accuracy of map2alm->alm2map should be quite good, depending on your choices of lmax, mmax and nside (for some typical values, see e.g., section 5.1 of https://arxiv.org/pdf/1010.2084).
"""
if not cb.is_seq(alms):
raise TypeError("alms must be a sequence")
check_max_nside(nside)
alms = smoothalm(
alms, fwhm=fwhm, sigma=sigma, pol=pol, inplace=inplace, verbose=verbose
)
if not cb.is_seq_of_seq(alms):
alms = [alms]
lonely = True
else:
lonely = False
if pixwin:
pw = globals()["pixwin"](nside, True)
alms_new = []
for ialm, alm in enumerate(alms):
pixelwindow = pw[1] if ialm >= 1 and pol else pw[0]
alms_new.append(almxfl(alm, pixelwindow, inplace=inplace))
else:
alms_new = alms
if lmax is None:
lmax = -1
if mmax is None:
mmax = -1
if pol:
output = sphtlib._alm2map(
alms_new[0] if lonely else tuple(alms_new), nside, lmax=lmax, mmax=mmax
)
if lonely:
output = [output]
else:
output = [
sphtlib._alm2map(alm, nside, lmax=lmax, mmax=mmax) for alm in alms_new
]
if lonely:
return output[0]
else:
return np.array(output)
def synalm(cls, lmax=None, mmax=None, new=False, verbose=True):
"""Generate a set of alm given cl.
The cl are given as a float array. Corresponding alm are generated.
If lmax is None, it is assumed lmax=cl.size-1
If mmax is None, it is assumed mmax=lmax.
Parameters
----------
cls : float, array or tuple of arrays
Either one cl (1D array) or a tuple of either 4 cl
or of n*(n+1)/2 cl.
Some of the cl may be None, implying no
cross-correlation. See *new* parameter.
lmax : int, scalar, optional
The lmax (if None or <0, the largest size-1 of cls)
mmax : int, scalar, optional
The mmax (if None or <0, =lmax)
new : bool, optional
If True, use the new ordering of cl's, ie by diagonal
(e.g. TT, EE, BB, TE, EB, TB or TT, EE, BB, TE if 4 cl as input).
If False, use the old ordering, ie by row
(e.g. TT, TE, TB, EE, EB, BB or TT, TE, EE, BB if 4 cl as input).
Returns
-------
alms : array or list of arrays
the generated alm if one spectrum is given, or a list of n alms
(with n(n+1)/2 the number of input cl, or n=3 if there are 4 input cl).
Notes
-----
The order of the spectra will change in a future release. The new= parameter
help to make the transition smoother. You can start using the new order
by setting new=True.
In the next version of healpy, the default will be new=True.
This change is done for consistency between the different tools
(alm2cl, synfast, anafast).
In the new order, the spectra are ordered by diagonal of the correlation
matrix. Eg, if fields are T, E, B, the spectra are TT, EE, BB, TE, EB, TB
with new=True, and TT, TE, TB, EE, EB, BB if new=False.
"""
if (not new) and verbose:
warnings.warn(
"The order of the input cl's will change in a future "
"release.\n"
"Use new=True keyword to start using the new order.\n"
"See documentation of healpy.synalm.",
category=FutureChangeWarning,
)
if not cb.is_seq(cls):
raise TypeError("cls must be an array or a sequence of arrays")
if not cb.is_seq_of_seq(cls):
# Only one spectrum
if lmax is None or lmax < 0:
lmax = cls.size - 1
if mmax is None or mmax < 0:
mmax = lmax
cls_list = [np.asarray(cls, dtype=np.float64)]
szalm = Alm.getsize(lmax, mmax)
alm = np.zeros(szalm, "D")
alm.real = np.random.standard_normal(szalm)
alm.imag = np.random.standard_normal(szalm)
alms_list = [alm]
sphtlib._synalm(cls_list, alms_list, lmax, mmax)
return alm
# From here, we interpret cls as a list of spectra
cls_list = list(cls)
maxsize = max([len(c) for c in cls])
if lmax is None or lmax < 0:
lmax = maxsize - 1
if mmax is None or mmax < 0:
mmax = lmax
Nspec = sphtlib._getn(len(cls_list))
if Nspec <= 0:
if len(cls_list) == 4:
if new: ## new input order: TT EE BB TE -> TT EE BB TE 0 0
cls_list = [cls[0], cls[1], cls[2], cls[3], None, None]
else: ## old input order: TT TE EE BB -> TT TE 0 EE 0 BB
cls_list = [cls[0], cls[1], None, cls[2], None, cls[3]]
Nspec = 3
else:
raise TypeError(
"The sequence of arrays must have either 4 elements "
"or n(n+1)/2 elements (some may be None)"
)
szalm = Alm.getsize(lmax, mmax)
alms_list = []
for i in six.moves.xrange(Nspec):
alm = np.zeros(szalm, "D")
alm.real = np.random.standard_normal(szalm)
alm.imag = np.random.standard_normal(szalm)
alms_list.append(alm)
if new: # new input order: input given by diagonal, should be given by row
cls_list = new_to_old_spectra_order(cls_list)
# ensure cls are float64
cls_list = [
(np.asarray(cl, dtype=np.float64) if cl is not None else None)
for cl in cls_list
]
sphtlib._synalm(cls_list, alms_list, lmax, mmax)
return np.array(alms_list)
def synfast(
cls,
nside,
lmax=None,
mmax=None,
alm=False,
pol=True,
pixwin=False,
fwhm=0.0,
sigma=None,
new=False,
verbose=True,
):
"""Create a map(s) from cl(s).
Parameters
----------
cls : array or tuple of array
A cl or a list of cl (either 4 or 6, see :func:`synalm`)
nside : int, scalar
The nside of the output map(s)
lmax : int, scalar, optional
Maximum l for alm. Default: min of 3*nside-1 or length of the cls - 1
mmax : int, scalar, optional
Maximum m for alm.
alm : bool, scalar, optional
If True, return also alm(s). Default: False.
pol : bool, optional
If True, assumes input cls are TEB and correlation. Output will be TQU maps.
(input must be 1, 4 or 6 cl's)
If False, fields are assumed to be described by spin 0 spherical harmonics.
(input can be any number of cl's)
If there is only one input cl, it has no effect. Default: True.
pixwin : bool, scalar, optional
If True, convolve the alm by the pixel window function. Default: False.
fwhm : float, scalar, optional
The fwhm of the Gaussian used to smooth the map (applied on alm)
[in radians]
sigma : float, scalar, optional
The sigma of the Gaussian used to smooth the map (applied on alm)
[in radians]
Returns
-------
maps : array or tuple of arrays
The output map (possibly list of maps if polarized input).
or, if alm is True, a tuple of (map,alm)
(alm possibly a list of alm if polarized input)
Notes
-----
The order of the spectra will change in a future release. The new= parameter
help to make the transition smoother. You can start using the new order
by setting new=True.
In the next version of healpy, the default will be new=True.
This change is done for consistency between the different tools
(alm2cl, synfast, anafast).
In the new order, the spectra are ordered by diagonal of the correlation
matrix. Eg, if fields are T, E, B, the spectra are TT, EE, BB, TE, EB, TB
with new=True, and TT, TE, TB, EE, EB, BB if new=False.
"""
if not pixelfunc.isnsideok(nside):
raise ValueError("Wrong nside value (must be a power of two).")
check_max_nside(nside)
cls_lmax = cb.len_array_or_arrays(cls) - 1
if lmax is None or lmax < 0:
lmax = min(cls_lmax, 3 * nside - 1)
alms = synalm(cls, lmax=lmax, mmax=mmax, new=new, verbose=verbose)
maps = alm2map(
alms,
nside,
lmax=lmax,
mmax=mmax,
pixwin=pixwin,
pol=pol,
fwhm=fwhm,
sigma=sigma,
inplace=True,
verbose=verbose,
)
if alm:
return np.array(maps), np.array(alms)
else:
return np.array(maps)
class Alm(object):
"""This class provides some static methods for alm index computation.
Methods
-------
getlm
getidx
getsize
getlmax
"""
def __init__(self):
pass
@staticmethod
def getlm(lmax, i=None):
"""Get the l and m from index and lmax.
Parameters
----------
lmax : int
The maximum l defining the alm layout
i : int or None
The index for which to compute the l and m.
If None, the function return l and m for i=0..Alm.getsize(lmax)
"""
szalm = Alm.getsize(lmax, lmax)
if i is None:
i = np.arange(szalm)
assert (
np.max(i) < szalm
), "Invalid index, it should less than the max alm array length of {}".format(
szalm
)
with np.errstate(all="raise"):
m = (
np.ceil(
((2 * lmax + 1) - np.sqrt((2 * lmax + 1) ** 2 - 8 * (i - lmax))) / 2
)
).astype(int)
l = i - m * (2 * lmax + 1 - m) // 2
return (l, m)
@staticmethod
def getidx(lmax, l, m):
"""Returns index corresponding to (l,m) in an array describing alm up to lmax.
In HEALPix C++ and healpy, :math:`a_{lm}` coefficients are stored ordered by
:math:`m`. I.e. if :math:`\ell_{max}` is 16, the first 16 elements are
:math:`m=0, \ell=0-16`, then the following 15 elements are :math:`m=1, \ell=1-16`,
then :math:`m=2, \ell=2-16` and so on until the last element, the 153th, is
:math:`m=16, \ell=16`.
Parameters
----------
lmax : int
The maximum l, defines the alm layout
l : int
The l for which to get the index
m : int
The m for which to get the index
Returns
-------
idx : int
The index corresponding to (l,m)
"""
return m * (2 * lmax + 1 - m) // 2 + l
@staticmethod
def getsize(lmax, mmax=None):
"""Returns the size of the array needed to store alm up to *lmax* and *mmax*
Parameters
----------
lmax : int
The maximum l, defines the alm layout
mmax : int, optional
The maximum m, defines the alm layout. Default: lmax.
Returns
-------
size : int
The size of the array needed to store alm up to lmax, mmax.
"""
if mmax is None or mmax < 0 or mmax > lmax:
mmax = lmax
return mmax * (2 * lmax + 1 - mmax) // 2 + lmax + 1
@staticmethod
def getlmax(s, mmax=None):
"""Returns the lmax corresponding to a given array size.
Parameters
----------
s : int
Size of the array
mmax : None or int, optional
The maximum m, defines the alm layout. Default: lmax.
Returns
-------
lmax : int
The maximum l of the array, or -1 if it is not a valid size.
"""
if mmax is not None and mmax >= 0:
x = (2 * s + mmax ** 2 - mmax - 2) / (2 * mmax + 2)
else:
x = (-3 + np.sqrt(1 + 8 * s)) / 2
if x != np.floor(x):
return -1
else:
return int(x)
def alm2cl(alms1, alms2=None, lmax=None, mmax=None, lmax_out=None, nspec=None):
"""Computes (cross-)spectra from alm(s). If alm2 is given, cross-spectra between
alm and alm2 are computed. If alm (and alm2 if provided) contains n alm,
then n(n+1)/2 auto and cross-spectra are returned.
Parameters
----------
alm : complex, array or sequence of arrays
The alm from which to compute the power spectrum. If n>=2 arrays are given,
computes both auto- and cross-spectra.
alms2 : complex, array or sequence of 3 arrays, optional
If provided, computes cross-spectra between alm and alm2.
Default: alm2=alm, so auto-spectra are computed.
lmax : None or int, optional
The maximum l of the input alm. Default: computed from size of alm
and mmax_in
mmax : None or int, optional
The maximum m of the input alm. Default: assume mmax_in = lmax_in
lmax_out : None or int, optional
The maximum l of the returned spectra. By default: the lmax of the given
alm(s).
nspec : None or int, optional
The number of spectra to return. None means all, otherwise returns cl[:nspec]
Returns
-------
cl : array or tuple of n(n+1)/2 arrays
the spectrum <*alm* x *alm2*> if *alm* (and *alm2*) is one alm, or
the auto- and cross-spectra <*alm*[i] x *alm2*[j]> if alm (and alm2)
contains more than one spectra.
If more than one spectrum is returned, they are ordered by diagonal.
For example, if *alm* is almT, almE, almB, then the returned spectra are:
TT, EE, BB, TE, EB, TB.
"""
cls = _sphtools.alm2cl(alms1, alms2=alms2, lmax=lmax, mmax=mmax, lmax_out=lmax_out)
if nspec is None:
return np.array(cls)
else:
return np.array(cls[:nspec])
def almxfl(alm, fl, mmax=None, inplace=False):
"""Multiply alm by a function of l. The function is assumed
to be zero where not defined.
Parameters
----------
alm : array
The alm to multiply
fl : array
The function (at l=0..fl.size-1) by which alm must be multiplied.
mmax : None or int, optional
The maximum m defining the alm layout. Default: lmax.
inplace : bool, optional
If True, modify the given alm, otherwise make a copy before multiplying.
Returns
-------
alm : array
The modified alm, either a new array or a reference to input alm,
if inplace is True.
"""
# FIXME: Should handle multidimensional input
almout = _sphtools.almxfl(alm, fl, mmax=mmax, inplace=inplace)
return almout
def smoothalm(
alms,
fwhm=0.0,
sigma=None,
beam_window=None,
pol=True,
mmax=None,
verbose=True,
inplace=True,
):
"""Smooth alm with a Gaussian symmetric beam function.
Parameters
----------
alms : array or sequence of 3 arrays
Either an array representing one alm, or a sequence of arrays.
See *pol* parameter.
fwhm : float, optional
The full width half max parameter of the Gaussian. Default:0.0
[in radians]
sigma : float, optional
The sigma of the Gaussian. Override fwhm.
[in radians]
beam_window: array, optional
Custom beam window function. Override fwhm and sigma.
pol : bool, optional
If True, assumes input alms are TEB. Output will be TQU maps.
(input must be 1 or 3 alms)
If False, apply spin 0 harmonic transform to each alm.
(input can be any number of alms)
If there is only one input alm, it has no effect. Default: True.
mmax : None or int, optional
The maximum m for alm. Default: mmax=lmax
inplace : bool, optional
If True, the alm's are modified inplace if they are contiguous arrays
of type complex128. Otherwise, a copy of alm is made. Default: True.
verbose : bool, optional
If True prints diagnostic information. Default: True
Call hp.disable_warnings() to disable warnings for all functions.
Returns
-------
alms : array or sequence of 3 arrays
The smoothed alm. If alm[i] is a contiguous array of type complex128,
and *inplace* is True the smoothing is applied inplace.
Otherwise, a copy is made.
"""
if (sigma is None) & (beam_window is None):
sigma = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
if verbose:
if beam_window is None:
warnings.warn(
"Sigma is {0:f} arcmin ({1:f} rad) ".format(
sigma * 60 * 180 / np.pi, sigma
)
)
warnings.warn(
"-> fwhm is {0:f} arcmin".format(
sigma * 60 * 180 / np.pi * (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
)
else:
warnings.warn("Using provided beam window function")
# Check alms
if not cb.is_seq(alms):
raise ValueError("alm must be a sequence")
if (sigma == 0) & (beam_window is None):
# nothing to be done
return alms
lonely = False
if not cb.is_seq_of_seq(alms):
alms = [alms]
lonely = True
# we have 3 alms -> apply smoothing to each map.
# polarization has different B_l from temperature
# exp{-[ell(ell+1) - s**2] * sigma**2/2}
# with s the spin of spherical harmonics
# s = 2 for pol, s=0 for temperature
retalm = []
for ialm, alm in enumerate(alms):
lmax = Alm.getlmax(len(alm), mmax)
if lmax < 0:
raise TypeError(
"Wrong alm size for the given "
"mmax (len(alms[%d]) = %d)." % (ialm, len(alm))
)
ell = np.arange(lmax + 1.0)
s = 2 if ialm >= 1 and pol else 0
if beam_window is None:
fact = np.exp(-0.5 * (ell * (ell + 1) - s ** 2) * sigma ** 2)
else:
fact = np.copy(beam_window)
res = almxfl(alm, fact, mmax=mmax, inplace=inplace)
retalm.append(res)
# Test what to return (inplace/not inplace...)
# Case 1: 1d input, return 1d output
if lonely:
return retalm[0]
# case 2: 2d input, check if in-place smoothing for all alm's
for i in six.moves.xrange(len(alms)):
samearray = alms[i] is retalm[i]
if not samearray:
# Case 2a:
# at least one of the alm could not be smoothed in place:
# return the list of alm
return np.array(retalm)
# Case 2b:
# all smoothing have been performed in place:
# return the input alms. If the input was a tuple, so will the output be.
return alms
@accept_ma
def smoothing(
map_in,
fwhm=0.0,
sigma=None,
beam_window=None,
pol=True,
iter=3,
lmax=None,
mmax=None,
use_weights=False,
use_pixel_weights=False,
datapath=None,
verbose=True,
):
"""Smooth a map with a Gaussian symmetric beam.
No removal of monopole or dipole is performed.
Parameters
----------
map_in : array or sequence of 3 arrays
Either an array representing one map, or a sequence of
3 arrays representing 3 maps, accepts masked arrays
fwhm : float, optional
The full width half max parameter of the Gaussian [in
radians]. Default:0.0
sigma : float, optional
The sigma of the Gaussian [in radians]. Override fwhm.
beam_window: array, optional
Custom beam window function. Override fwhm and sigma.
pol : bool, optional
If True, assumes input maps are TQU. Output will be TQU maps.
(input must be 1 or 3 alms)
If False, each map is assumed to be a spin 0 map and is
treated independently (input can be any number of alms).
If there is only one input map, it has no effect. Default: True.
iter : int, scalar, optional
Number of iteration (default: 3)
lmax : int, scalar, optional
Maximum l of the power spectrum. Default: 3*nside-1
mmax : int, scalar, optional
Maximum m of the alm. Default: lmax
use_weights: bool, scalar, optional
If True, use the ring weighting. Default: False.
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
See the map2alm docs for details about weighting
datapath : None or str, optional
If given, the directory where to find the weights data.
verbose : bool, optional
If True prints diagnostic information. Default: True
Returns
-------
maps : array or list of 3 arrays
The smoothed map(s)
"""
if not cb.is_seq(map_in):
raise TypeError("map_in must be a sequence")
# save the masks of inputs
masks = pixelfunc.mask_bad(map_in)
if cb.is_seq_of_seq(map_in):
nside = pixelfunc.get_nside(map_in)
n_maps = len(map_in)
else:
nside = pixelfunc.get_nside(map_in)
n_maps = 0
check_max_nside(nside)
if pol or n_maps in (0, 1):
# Treat the maps together (1 or 3 maps)
alms = map2alm(
map_in,
lmax=lmax,
mmax=mmax,
iter=iter,
pol=pol,
use_weights=use_weights,
use_pixel_weights=use_pixel_weights,
datapath=datapath,
)
smoothalm(
alms,
fwhm=fwhm,
sigma=sigma,
beam_window=beam_window,
pol=pol,
mmax=mmax,
verbose=verbose,
inplace=True,
)
output_map = alm2map(
alms, nside, lmax=lmax, mmax=mmax, pixwin=False, verbose=verbose, pol=pol,
)
else:
# Treat each map independently (any number)
output_map = []
for m in map_in:
alm = map2alm(
m,
lmax=lmax,
mmax=mmax,
iter=iter,
pol=pol,
use_weights=use_weights,
use_pixel_weights=use_pixel_weights,
datapath=datapath,
)
smoothalm(
alm,
fwhm=fwhm,
sigma=sigma,
beam_window=beam_window,
inplace=True,
verbose=verbose,
)
output_map.append(alm2map(alm, nside, pixwin=False, verbose=verbose))
output_map = np.array(output_map)
output_map[masks] = UNSEEN
return output_map
def pixwin(nside, pol=False, lmax=None):
"""Return the pixel window function for the given nside.
Parameters
----------
nside : int
The nside for which to return the pixel window function
pol : bool, optional
If True, return also the polar pixel window. Default: False
lmax : int, optional
Maximum l of the power spectrum (default: 3*nside-1)
Returns
-------
pw or pwT,pwP : array or tuple of 2 arrays
The temperature pixel window function, or a tuple with both
temperature and polarisation pixel window functions.
"""
if lmax is None:
lmax = 3 * nside - 1
datapath = DATAPATH
if not pixelfunc.isnsideok(nside):
raise ValueError("Wrong nside value (must be a power of two).")
fname = os.path.join(datapath, "pixel_window_n%04d.fits" % nside)
if not os.path.isfile(fname):
raise ValueError("No pixel window for this nside " "or data files missing")
# return hfitslib._pixwin(nside,datapath,pol) ## BROKEN -> seg fault...
pw = pf.getdata(fname)
pw_temp, pw_pol = pw.field(0), pw.field(1)
if pol:
return pw_temp[: lmax + 1], pw_pol[: lmax + 1]
else:
return pw_temp[: lmax + 1]
def alm2map_der1(alm, nside, lmax=None, mmax=None):
"""Computes a Healpix map and its first derivatives given the alm.
The alm are given as a complex array. You can specify lmax
and mmax, or they will be computed from array size (assuming
lmax==mmax).
Parameters
----------
alm : array, complex
A complex array of alm. Size must be of the form mmax(lmax-mmax+1)/2+lmax
nside : int
The nside of the output map.
lmax : None or int, optional
Explicitly define lmax (needed if mmax!=lmax)
mmax : None or int, optional
Explicitly define mmax (needed if mmax!=lmax)
Returns
-------
m, d_theta, d_phi : tuple of arrays
The maps correponding to alm, and its derivatives with respect to
theta and phi. d_phi is already divided by sin(theta)
"""
check_max_nside(nside)
if lmax is None:
lmax = -1
if mmax is None:
mmax = -1
return np.array(sphtlib._alm2map_der1(alm, nside, lmax=lmax, mmax=mmax))
def new_to_old_spectra_order(cls_new_order):
"""Reorder the cls from new order (by diagonal) to old order (by row).
For example : TT, EE, BB, TE, EB, BB => TT, TE, TB, EE, EB, BB
"""
Nspec = sphtlib._getn(len(cls_new_order))
if Nspec < 0:
raise ValueError("Input must be a list of n(n+1)/2 arrays")
cls_old_order = []
for i in six.moves.xrange(Nspec):
for j in six.moves.xrange(i, Nspec):
p = j - i
q = i
idx_new = p * (2 * Nspec + 1 - p) // 2 + q
cls_old_order.append(cls_new_order[idx_new])
return cls_old_order
def load_sample_spectra():
"""Read a sample power spectra for testing and demo purpose.
Based on LambdaCDM. Gives TT, EE, BB, TE.
Returns
-------
ell, f, cls : arrays
ell is the array of ell values (from 0 to lmax)
f is the factor ell*(ell+1)/2pi (in general, plots show f * cl)
cls is a sequence of the power spectra TT, EE, BB and TE
"""
cls = np.loadtxt(os.path.join(DATAPATH, "totcls.dat"), unpack=True)
ell = cls[0]
f = ell * (ell + 1) / 2 / np.pi
cls[1:, 1:] /= f[1:]
return ell, f, cls[1:]
def gauss_beam(fwhm, lmax=512, pol=False):
"""Gaussian beam window function
Computes the spherical transform of an axisimmetric gaussian beam
For a sky of underlying power spectrum C(l) observed with beam of
given FWHM, the measured power spectrum will be
C(l)_meas = C(l) B(l)^2
where B(l) is given by gaussbeam(Fwhm,Lmax).
The polarization beam is also provided (when pol = True ) assuming
a perfectly co-polarized beam
(e.g., Challinor et al 2000, astro-ph/0008228)
Parameters
----------
fwhm : float
full width half max in radians
lmax : integer
ell max
pol : bool
if False, output has size (lmax+1) and is temperature beam
if True output has size (lmax+1, 4) with components:
* temperature beam
* grad/electric polarization beam
* curl/magnetic polarization beam
* temperature * grad beam
Returns
-------
beam : array
beam window function [0, lmax] if dim not specified
otherwise (lmax+1, 4) contains polarized beam
"""
sigma = fwhm / np.sqrt(8.0 * np.log(2.0))
ell = np.arange(lmax + 1)
sigma2 = sigma ** 2
g = np.exp(-0.5 * ell * (ell + 1) * sigma2)
if not pol: # temperature-only beam
return g
else: # polarization beam
# polarization factors [1, 2 sigma^2, 2 sigma^2, sigma^2]
pol_factor = np.exp([0.0, 2 * sigma2, 2 * sigma2, sigma2])
return g[:, np.newaxis] * pol_factor
def bl2beam(bl, theta):
"""Computes a circular beam profile b(theta) in real space from
its transfer (or window) function b(l) in spherical harmonic space.
Parameters
----------
bl : array
Window function b(l) of the beam.
theta : array
Radius at which the beam profile will be computed.
Has to be given in radians.
Returns
-------
beam : array
(Circular) beam profile b(theta).
"""
lmax = len(bl) - 1
nx = len(theta)
x = np.cos(theta)
p0 = np.zeros(nx) + 1
p1 = x
beam = bl[0] * p0 + bl[1] * p1 * 3
for l in np.arange(2, lmax):
p2 = x * p1 * (2 * l - 1) / l - p0 * (l - 1) / l
p0 = p1
p1 = p2
beam += bl[l] * p2 * (2 * l + 1)
beam /= 4 * np.pi
return beam
def beam2bl(beam, theta, lmax):
"""Computes a transfer (or window) function b(l) in spherical
harmonic space from its circular beam profile b(theta) in real
space.
Parameters
----------
beam : array
Circular beam profile b(theta).
theta : array
Radius at which the beam profile is given. Has to be given
in radians with same size as beam.
lmax : integer
Maximum multipole moment at which to compute b(l).
Returns
-------
bl : array
Beam window function b(l).
"""
nx = len(theta)
nb = len(beam)
if nb != nx:
warnings.warn("beam and theta must have same size!")
x = np.cos(theta)
st = np.sin(theta)
window = np.zeros(lmax + 1)
p0 = np.ones(nx)
p1 = np.copy(x)
window[0] = trapz(beam * p0 * st, theta)
window[1] = trapz(beam * p1 * st, theta)
for l in np.arange(2, lmax + 1):
p2 = x * p1 * (2 * l - 1) / l - p0 * (l - 1) / l
window[l] = trapz(beam * p2 * st, theta)
p0 = p1
p1 = p2
window *= 2 * np.pi
return window
def check_max_nside(nside):
"""Checks whether the nside used in a certain operation does not exceed the
maximum supported nside. The maximum nside is saved in MAX_NSIDE.
Parameters
----------
nside : int
nside of the map that is being checked
"""
if nside > MAX_NSIDE:
raise ValueError(
"nside {nside} of map cannot be larger than "
"MAX_NSIDE {max_nside}".format(nside=nside, max_nside=MAX_NSIDE)
)
return 0
|
cjcopi/healpy
|
healpy/sphtfunc.py
|
Python
|
gpl-2.0
| 41,728
|
[
"Gaussian"
] |
6935eec374ab02d6a6d466128991898f62d3e6ef441581b56adc6587975c9035
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import numpy as np
import tempfile
import os
import sys
import mdtraj as md
from mdtraj.formats import LH5TrajectoryFile
from mdtraj.testing import eq
import pytest
on_win = (sys.platform == 'win32')
on_py3 = (sys.version_info >= (3, 0))
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(on_win and on_py3, reason='lh5 not supported on windows on python 3')
fd, temp = tempfile.mkstemp(suffix='.lh5')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.close(fd)
os.unlink(temp)
def test_write_coordinates():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), coordinates, decimal=3)
with LH5TrajectoryFile(temp) as f:
f.seek(2)
eq(f.read(), coordinates[2:], decimal=3)
f.seek(0)
eq(f.read(), coordinates[0:], decimal=3)
f.seek(-1, 2)
eq(f.read(), coordinates[3:], decimal=3)
def test_write_coordinates_reshape():
coordinates = np.random.randn(10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), coordinates.reshape(1, 10, 3), decimal=3)
def test_write_multiple():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), np.vstack((coordinates, coordinates)), decimal=3)
def test_topology(get_fn):
top = md.load(get_fn('native.pdb')).topology
with LH5TrajectoryFile(temp, 'w') as f:
f.topology = top
with LH5TrajectoryFile(temp) as f:
assert f.topology == top
def test_read_slice_0():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(n_frames=2), coordinates[:2], decimal=3)
eq(f.read(n_frames=2), coordinates[2:4], decimal=3)
with LH5TrajectoryFile(temp) as f:
eq(f.read(stride=2), coordinates[::2], decimal=3)
with LH5TrajectoryFile(temp) as f:
eq(f.read(stride=2, atom_indices=np.array([0, 1])), coordinates[::2, [0, 1], :], decimal=3)
def test_vsite_elements(get_fn):
# Test case for issue #263
pdb_filename = get_fn('GG-tip4pew.pdb')
trj = md.load(pdb_filename)
trj.save_lh5(temp)
trj2 = md.load(temp, top=pdb_filename)
def test_do_overwrite_0():
with open(temp, 'w') as f:
f.write('a')
with LH5TrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(np.random.randn(10, 5, 3))
def test_do_overwrite_1():
with open(temp, 'w') as f:
f.write('a')
with pytest.raises(IOError):
with LH5TrajectoryFile(temp, 'w', force_overwrite=False) as f:
f.write(np.random.randn(10, 5, 3))
|
leeping/mdtraj
|
tests/test_lh5.py
|
Python
|
lgpl-2.1
| 4,054
|
[
"MDTraj"
] |
acc95c4b6b1a15ae8cf0daa88011720fddb8bedecdd7bb78fc125368668f4a83
|
# -*- coding: utf-8 -*-
# Copyright 2014-2022 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://danbooru.donmai.us/ and other Danbooru instances"""
from .common import BaseExtractor, Message
from .. import text
import datetime
class DanbooruExtractor(BaseExtractor):
"""Base class for danbooru extractors"""
basecategory = "Danbooru"
filename_fmt = "{category}_{id}_{filename}.{extension}"
page_limit = 1000
page_start = None
per_page = 200
def __init__(self, match):
BaseExtractor.__init__(self, match)
self.ugoira = self.config("ugoira", False)
self.external = self.config("external", False)
self.extended_metadata = self.config("metadata", False)
username, api_key = self._get_auth_info()
if username:
self.log.debug("Using HTTP Basic Auth for user '%s'", username)
self.session.auth = (username, api_key)
instance = INSTANCES.get(self.category) or {}
iget = instance.get
self.headers = iget("headers")
self.page_limit = iget("page-limit", 1000)
self.page_start = iget("page-start")
self.per_page = iget("per-page", 200)
self.request_interval_min = iget("request-interval-min", 0.0)
self._pools = iget("pools")
def request(self, url, **kwargs):
kwargs["headers"] = self.headers
return BaseExtractor.request(self, url, **kwargs)
def skip(self, num):
pages = num // self.per_page
if pages >= self.page_limit:
pages = self.page_limit - 1
self.page_start = pages + 1
return pages * self.per_page
def items(self):
data = self.metadata()
for post in self.posts():
file = post.get("file")
if file:
url = file["url"]
if not url:
md5 = file["md5"]
url = file["url"] = (
"https://static1.{}/data/{}/{}/{}.{}".format(
self.root[8:], md5[0:2], md5[2:4], md5, file["ext"]
))
post["filename"] = file["md5"]
post["extension"] = file["ext"]
else:
try:
url = post["file_url"]
except KeyError:
if self.external and post["source"]:
post.update(data)
yield Message.Directory, post
yield Message.Queue, post["source"], post
continue
text.nameext_from_url(url, post)
if post["extension"] == "zip":
if self.ugoira:
post["frames"] = self.request(
"{}/posts/{}.json?only=pixiv_ugoira_frame_data".format(
self.root, post["id"])
).json()["pixiv_ugoira_frame_data"]["data"]
post["_http_adjust_extension"] = False
else:
url = post["large_file_url"]
post["extension"] = "webm"
if self.extended_metadata:
template = (
"{}/posts/{}.json"
"?only=artist_commentary,children,notes,parent"
)
resp = self.request(template.format(self.root, post["id"]))
post.update(resp.json())
post.update(data)
yield Message.Directory, post
yield Message.Url, url, post
def metadata(self):
return ()
def posts(self):
return ()
def _pagination(self, endpoint, params, pagenum=False):
url = self.root + endpoint
params["limit"] = self.per_page
params["page"] = self.page_start
while True:
posts = self.request(url, params=params).json()
if "posts" in posts:
posts = posts["posts"]
yield from posts
if len(posts) < self.per_page:
return
if pagenum:
params["page"] += 1
else:
for post in reversed(posts):
if "id" in post:
params["page"] = "b{}".format(post["id"])
break
else:
return
INSTANCES = {
"danbooru": {
"root": None,
"pattern": r"(?:danbooru|hijiribe|sonohara|safebooru)\.donmai\.us",
},
"e621": {
"root": None,
"pattern": r"e(?:621|926)\.net",
"headers": {"User-Agent": "gallery-dl/1.14.0 (by mikf)"},
"pools": "sort",
"page-limit": 750,
"per-page": 320,
"request-interval-min": 1.0,
},
"atfbooru": {
"root": "https://booru.allthefallen.moe",
"pattern": r"booru\.allthefallen\.moe",
"page-limit": 5000,
},
}
BASE_PATTERN = DanbooruExtractor.update(INSTANCES)
class DanbooruTagExtractor(DanbooruExtractor):
"""Extractor for danbooru posts from tag searches"""
subcategory = "tag"
directory_fmt = ("{category}", "{search_tags}")
archive_fmt = "t_{search_tags}_{id}"
pattern = BASE_PATTERN + r"/posts\?(?:[^&#]*&)*tags=([^&#]*)"
test = (
("https://danbooru.donmai.us/posts?tags=bonocho", {
"content": "b196fb9f1668109d7774a0a82efea3ffdda07746",
}),
# test page transitions
("https://danbooru.donmai.us/posts?tags=mushishi", {
"count": ">= 300",
}),
# 'external' option (#1747)
("https://danbooru.donmai.us/posts?tags=pixiv_id%3A1476533", {
"options": (("external", True),),
"pattern": r"http://img16.pixiv.net/img/takaraakihito/1476533.jpg",
}),
("https://e621.net/posts?tags=anry", {
"url": "8021e5ea28d47c474c1ffc9bd44863c4d45700ba",
"content": "501d1e5d922da20ee8ff9806f5ed3ce3a684fd58",
}),
("https://booru.allthefallen.moe/posts?tags=yume_shokunin", {
"count": 12,
}),
("https://hijiribe.donmai.us/posts?tags=bonocho"),
("https://sonohara.donmai.us/posts?tags=bonocho"),
("https://safebooru.donmai.us/posts?tags=bonocho"),
("https://e926.net/posts?tags=anry"),
)
def __init__(self, match):
DanbooruExtractor.__init__(self, match)
tags = match.group(match.lastindex)
self.tags = text.unquote(tags.replace("+", " "))
def metadata(self):
return {"search_tags": self.tags}
def posts(self):
return self._pagination("/posts.json", {"tags": self.tags})
class DanbooruPoolExtractor(DanbooruExtractor):
"""Extractor for posts from danbooru pools"""
subcategory = "pool"
directory_fmt = ("{category}", "pool", "{pool[id]} {pool[name]}")
archive_fmt = "p_{pool[id]}_{id}"
pattern = BASE_PATTERN + r"/pool(?:s|/show)/(\d+)"
test = (
("https://danbooru.donmai.us/pools/7659", {
"content": "b16bab12bea5f7ea9e0a836bf8045f280e113d99",
}),
("https://e621.net/pools/73", {
"url": "1bd09a72715286a79eea3b7f09f51b3493eb579a",
"content": "91abe5d5334425d9787811d7f06d34c77974cd22",
}),
("https://booru.allthefallen.moe/pools/9", {
"url": "902549ffcdb00fe033c3f63e12bc3cb95c5fd8d5",
"count": 6,
}),
("https://danbooru.donmai.us/pool/show/7659"),
("https://e621.net/pool/show/73"),
)
def __init__(self, match):
DanbooruExtractor.__init__(self, match)
self.pool_id = match.group(match.lastindex)
self.post_ids = ()
def metadata(self):
url = "{}/pools/{}.json".format(self.root, self.pool_id)
pool = self.request(url).json()
pool["name"] = pool["name"].replace("_", " ")
self.post_ids = pool.pop("post_ids", ())
return {"pool": pool}
def posts(self):
if self._pools == "sort":
self.log.info("Fetching posts of pool %s", self.pool_id)
id_to_post = {
post["id"]: post
for post in self._pagination(
"/posts.json", {"tags": "pool:" + self.pool_id})
}
posts = []
append = posts.append
for num, pid in enumerate(self.post_ids, 1):
if pid in id_to_post:
post = id_to_post[pid]
post["num"] = num
append(post)
else:
self.log.warning("Post %s is unavailable", pid)
return posts
else:
params = {"tags": "pool:" + self.pool_id}
return self._pagination("/posts.json", params)
class DanbooruPostExtractor(DanbooruExtractor):
"""Extractor for single danbooru posts"""
subcategory = "post"
archive_fmt = "{id}"
pattern = BASE_PATTERN + r"/post(?:s|/show)/(\d+)"
test = (
("https://danbooru.donmai.us/posts/294929", {
"content": "5e255713cbf0a8e0801dc423563c34d896bb9229",
}),
("https://danbooru.donmai.us/posts/3613024", {
"pattern": r"https?://.+\.zip$",
"options": (("ugoira", True),)
}),
("https://e621.net/posts/535", {
"url": "f7f78b44c9b88f8f09caac080adc8d6d9fdaa529",
"content": "66f46e96a893fba8e694c4e049b23c2acc9af462",
}),
("https://booru.allthefallen.moe/posts/22", {
"content": "21dda68e1d7e0a554078e62923f537d8e895cac8",
}),
("https://danbooru.donmai.us/post/show/294929"),
("https://e621.net/post/show/535"),
)
def __init__(self, match):
DanbooruExtractor.__init__(self, match)
self.post_id = match.group(match.lastindex)
def posts(self):
url = "{}/posts/{}.json".format(self.root, self.post_id)
post = self.request(url).json()
return (post["post"] if "post" in post else post,)
class DanbooruPopularExtractor(DanbooruExtractor):
"""Extractor for popular images from danbooru"""
subcategory = "popular"
directory_fmt = ("{category}", "popular", "{scale}", "{date}")
archive_fmt = "P_{scale[0]}_{date}_{id}"
pattern = BASE_PATTERN + r"/explore/posts/popular(?:\?([^#]*))?"
test = (
("https://danbooru.donmai.us/explore/posts/popular"),
(("https://danbooru.donmai.us/explore/posts/popular"
"?date=2013-06-06&scale=week"), {
"range": "1-120",
"count": 120,
}),
("https://e621.net/explore/posts/popular"),
(("https://e621.net/explore/posts/popular"
"?date=2019-06-01&scale=month"), {
"pattern": r"https://static\d.e621.net/data/../../[0-9a-f]+",
"count": ">= 70",
}),
("https://booru.allthefallen.moe/explore/posts/popular"),
)
def __init__(self, match):
DanbooruExtractor.__init__(self, match)
self.params = match.group(match.lastindex)
def metadata(self):
self.params = params = text.parse_query(self.params)
scale = params.get("scale", "day")
date = params.get("date") or datetime.date.today().isoformat()
if scale == "week":
date = datetime.date.fromisoformat(date)
date = (date - datetime.timedelta(days=date.weekday())).isoformat()
elif scale == "month":
date = date[:-3]
return {"date": date, "scale": scale}
def posts(self):
if self.page_start is None:
self.page_start = 1
return self._pagination(
"/explore/posts/popular.json", self.params, True)
class DanbooruFavoriteExtractor(DanbooruExtractor):
"""Extractor for e621 favorites"""
subcategory = "favorite"
directory_fmt = ("{category}", "Favorites", "{user_id}")
archive_fmt = "f_{user_id}_{id}"
pattern = BASE_PATTERN + r"/favorites(?:\?([^#]*))?"
test = (
("https://e621.net/favorites"),
("https://e621.net/favorites?page=2&user_id=53275", {
"pattern": r"https://static\d.e621.net/data/../../[0-9a-f]+",
"count": "> 260",
}),
)
def __init__(self, match):
DanbooruExtractor.__init__(self, match)
self.query = text.parse_query(match.group(match.lastindex))
def metadata(self):
return {"user_id": self.query.get("user_id", "")}
def posts(self):
if self.page_start is None:
self.page_start = 1
return self._pagination("/favorites.json", self.query, True)
|
mikf/gallery-dl
|
gallery_dl/extractor/danbooru.py
|
Python
|
gpl-2.0
| 12,770
|
[
"MOE"
] |
7d12f50ed3f45f6d9300eea3ab27ebc60f4491c7a9e409852b9ea3e10994bbb7
|
"""
This file run the pipeline of doing differential expression analysis of
Contaminated CHO samples.
"""
import os
import sys
import subprocess
sys.path.append('/home/shangzhong/Codes/Pipeline')
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer
from Modules.f00_Message import Message
from Modules.f01_list_trim_fq import list_files,Trimmomatic
from Modules.f02_aligner_command import gsnap,STAR,STAR_Db,bowtie
from Modules.f03_samtools import sam2bam_sort,flagstat
from Modules.f04_htseq import htseq_count
from Modules.f05_IDConvert import geneSymbol2EntrezID
from Modules.p01_FileProcess import get_parameters
from Modules.p01_FileProcess import remove
#=========== define some parameters ===================
#parFile = '/data/shangzhong/DE/FDA/01_DE_Parameters.txt'
parFile = sys.argv[1]
param = get_parameters(parFile)
thread = param['thread']
email = param['email']
startMessage = param['startMessage']
endMessage = param['endMessage']
dataSource = param['dataSource']
ref_fa = param['refSequence']
file_path = param['filePath']
db_path = param['alignerDb']
trim = param['trim']
phred = param['phred']
trimmomatic = param['trimmomatic']
trimmoAdapter = param['trimmoAdapter']
aligner = param['aligner']
annotation = param['annotation']
output_path = param['htseqOutPath']
htseqBatch = param['htseqBatch']
db_name = param['gsnapDbName']
gsnap_annotation = param['gsnapAnnotation']
Dict = param['symbolIDFile']
inputpath = file_path
#=========== (0) enter the directory ================
os.chdir(file_path)
Message(startMessage,email)
#=========== (1) reads files and trim ===============
fastqFiles = list_files(file_path)
print 'list file succeed'
if trim == 'True':
try:
trim_fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter,batch=6)
print 'trim succeed'
print 'fastqFiles is: ',fastqFiles
remove(fastqFiles)
except:
print 'trim failed'
Message('trim failed',email)
raise
else:
trim_fastqFiles = fastqFiles
#=========== (2) run STAR to do the mapping ========
try:
if aligner == 'gsnap':
map_files = gsnap(trim_fastqFiles,db_path, db_name,gsnap_annotation,thread)
elif aligner == 'STAR':
if not os.path.exists(db_path): os.mkdir(db_path)
if os.listdir(db_path) == []:
STAR_Db(db_path,ref_fa,thread)
map_files = STAR(trim_fastqFiles,db_path,thread,annotation,['--outSAMtype BAM SortedByCoordinate','--quantMode GeneCounts'])
elif aligner == 'bowtie':
map_files = bowtie(trim_fastqFiles,db_path,thread=1,otherParameters=[''])
print 'align succeed'
print 'map_files is: ',map_files
except:
print 'align failed'
Message('align failed',email)
raise
#=========== (3) samtools to sort the file ==========
try:
sorted_bams = sam2bam_sort(map_files,thread,'name')
print 'sorted succeed'
print 'sorted_bam is: ',sorted_bams
except:
print 'sorted failed'
Message('sorted failed',email)
raise
#=========== (4) get mapping stats ==================
try:
flagstat(sorted_bams)
print 'flagstat succeed'
except:
print 'flagstat failed'
Message('flagstat failed',email)
raise
#=========== (4) htseq_count ========================
try:
htseq_count(sorted_bams,annotation,output_path,dataSource,htseqBatch)
print 'htseq count succeed'
except:
print 'htseq count failed'
Message('htseq count failed',email)
raise
#=========== (5) htseq symbol to id =================
if dataSource == 'ncbi':
try:
geneSymbol2EntrezID(Dict,output_path,output_path)
print 'id convert succeed'
except:
print 'id convert failed'
Message('id convert failed',email)
raise
Message(endMessage,email)
|
shl198/Pipeline
|
DiffExpression/01_DE_pipeline.py
|
Python
|
mit
| 3,771
|
[
"Bowtie",
"HTSeq"
] |
0496ceff5cee7b9646d150ad29aaa64f78448368a5e0eb6412af649e2ceb0494
|
#!/usr/bin/env python3
from future.utils import iteritems
from past.builtins import cmp
from functools import cmp_to_key
import http.server
import base64
import io
import json
import threading
import time
import hashlib
import os
import sys
from future.moves.urllib.parse import parse_qs
from decimal import Decimal
from optparse import OptionParser
from twisted.internet import reactor
from datetime import datetime, timedelta
if sys.version_info < (3, 7):
print("ERROR: this script requires at least python 3.7")
exit(1)
from jmbase.support import EXIT_FAILURE
from jmbase import bintohex
from jmclient import FidelityBondMixin, get_interest_rate
from jmclient.fidelity_bond import FidelityBondProof
import sybil_attack_calculations as sybil
from jmbase import get_log
log = get_log()
try:
import matplotlib
except:
log.warning("matplotlib not found, charts will not be available. "
"Do `pip install matplotlib` in the joinmarket virtualenv.")
if 'matplotlib' in sys.modules:
# https://stackoverflow.com/questions/2801882/generating-a-png-with-matplotlib-when-display-is-undefined
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from jmclient import jm_single, load_program_config, calc_cj_fee, \
get_irc_mchannels, add_base_options
from jmdaemon import OrderbookWatch, MessageChannelCollection, IRCMessageChannel
#TODO this is only for base58, find a solution for a client without jmbitcoin
import jmbitcoin as btc
from jmdaemon.protocol import *
#Initial state: allow only SW offer types
sw0offers = list(filter(lambda x: x[0:3] == 'sw0', offername_list))
swoffers = list(filter(lambda x: x[0:3] == 'swa' or x[0:3] == 'swr', offername_list))
filtered_offername_list = sw0offers
rotateObform = '<form action="rotateOb" method="post"><input type="submit" value="Rotate orderbooks"/></form>'
refresh_orderbook_form = '<form action="refreshorderbook" method="post"><input type="submit" value="Check for timed-out counterparties" /></form>'
sorted_units = ('BTC', 'mBTC', 'μBTC', 'satoshi')
unit_to_power = {'BTC': 8, 'mBTC': 5, 'μBTC': 2, 'satoshi': 0}
sorted_rel_units = ('%', '‱', 'ppm')
rel_unit_to_factor = {'%': 100, '‱': 1e4, 'ppm': 1e6}
def calc_depth_data(db, value):
pass
def get_graph_html(fig):
imbuf = io.BytesIO()
fig.savefig(imbuf, format='png')
b64 = base64.b64encode(imbuf.getvalue()).decode('utf-8')
return '<img src="data:image/png;base64,' + b64 + '" />'
# callback functions for displaying order data
def do_nothing(arg, order, btc_unit, rel_unit):
return arg
def ordertype_display(ordertype, order, btc_unit, rel_unit):
ordertypes = {'sw0absoffer': 'Native SW Absolute Fee', 'sw0reloffer': 'Native SW Relative Fee',
'swabsoffer': 'SW Absolute Fee', 'swreloffer': 'SW Relative Fee'}
return ordertypes[ordertype]
def cjfee_display(cjfee, order, btc_unit, rel_unit):
if order['ordertype'] in ['swabsoffer', 'sw0absoffer']:
return satoshi_to_unit(cjfee, order, btc_unit, rel_unit)
elif order['ordertype'] in ['reloffer', 'swreloffer', 'sw0reloffer']:
return str(Decimal(cjfee) * Decimal(rel_unit_to_factor[rel_unit])) + rel_unit
def satoshi_to_unit_power(sat, power):
return ("%." + str(power) + "f") % float(
Decimal(sat) / Decimal(10 ** power))
def satoshi_to_unit(sat, order, btc_unit, rel_unit):
return satoshi_to_unit_power(sat, unit_to_power[btc_unit])
def order_str(s, order, btc_unit, rel_unit):
return str(s)
def create_offerbook_table_heading(btc_unit, rel_unit):
col = ' <th>{1}</th>\n' # .format(field,label)
tableheading = '<table class="tftable sortable" border="1">\n <tr>' + ''.join(
[
col.format('ordertype', 'Type'),
col.format('counterparty', 'Counterparty'),
col.format('oid', 'Order ID'),
col.format('cjfee', 'Fee'),
col.format('txfee', 'Miner Fee Contribution / ' + btc_unit),
col.format('minsize', 'Minimum Size / ' + btc_unit),
col.format('maxsize', 'Maximum Size / ' + btc_unit),
col.format('bondvalue', 'Bond value / ' + btc_unit + '²')
]) + ' </tr>'
return tableheading
def create_bonds_table_heading(btc_unit):
tableheading = ('<table class="tftable sortable" border="1"><tr>'
+ '<th>Counterparty</th>'
+ '<th>UTXO</th>'
+ '<th>Bond value / ' + btc_unit + '²</th>'
+ '<th>Locktime</th>'
+ '<th>Locked coins / ' + btc_unit + '</th>'
+ '<th>Confirmation time</th>'
+ '<th>Signature expiry height</th>'
+ '<th>Redeem script</th>'
+ '</tr>'
)
return tableheading
def create_choose_units_form(selected_btc, selected_rel):
choose_units_form = (
'<form method="get" action="">' +
'<select name="btcunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_units)) +
'</select><select name="relunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_rel_units)) +
'</select></form>')
choose_units_form = choose_units_form.replace(
'<option>' + selected_btc,
'<option selected="selected">' + selected_btc)
choose_units_form = choose_units_form.replace(
'<option>' + selected_rel,
'<option selected="selected">' + selected_rel)
return choose_units_form
def get_fidelity_bond_data(taker):
with taker.dblock:
fbonds = taker.db.execute("SELECT * FROM fidelitybonds;").fetchall()
blocks = jm_single().bc_interface.get_current_block_height()
mediantime = jm_single().bc_interface.get_best_block_median_time()
interest_rate = get_interest_rate()
bond_utxo_set = set()
fidelity_bond_data = []
bond_outpoint_conf_times = []
fidelity_bond_values = []
for fb in fbonds:
try:
parsed_bond = FidelityBondProof.parse_and_verify_proof_msg(fb["counterparty"],
fb["takernick"], fb["proof"])
except ValueError:
continue
bond_utxo_data = FidelityBondMixin.get_validated_timelocked_fidelity_bond_utxo(
parsed_bond.utxo, parsed_bond.utxo_pub, parsed_bond.locktime, parsed_bond.cert_expiry,
blocks)
if bond_utxo_data == None:
continue
#check for duplicated utxos i.e. two or more makers using the same UTXO
# which is obviously not allowed, a fidelity bond must only be usable by one maker nick
utxo_str = parsed_bond.utxo[0] + b":" + str(parsed_bond.utxo[1]).encode("ascii")
if utxo_str in bond_utxo_set:
continue
bond_utxo_set.add(utxo_str)
fidelity_bond_data.append((parsed_bond, bond_utxo_data))
conf_time = jm_single().bc_interface.get_block_time(
jm_single().bc_interface.get_block_hash(
blocks - bond_utxo_data["confirms"] + 1
)
)
bond_outpoint_conf_times.append(conf_time)
bond_value = FidelityBondMixin.calculate_timelocked_fidelity_bond_value(
bond_utxo_data["value"],
conf_time,
parsed_bond.locktime,
mediantime,
interest_rate)
fidelity_bond_values.append(bond_value)
return (fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times)
class OrderbookPageRequestHeader(http.server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, base_server):
self.taker = base_server.taker
self.base_server = base_server
http.server.SimpleHTTPRequestHandler.__init__(
self, request, client_address, base_server,
directory=os.path.dirname(os.path.realpath(__file__)))
def create_orderbook_obj(self):
with self.taker.dblock:
rows = self.taker.db.execute('SELECT * FROM orderbook;').fetchall()
fbonds = self.taker.db.execute("SELECT * FROM fidelitybonds;").fetchall()
if not rows or not fbonds:
return []
fidelitybonds = []
if jm_single().bc_interface != None:
(fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times) =\
get_fidelity_bond_data(self.taker)
fidelity_bond_values_dict = dict([(bond_data.maker_nick, bond_value)
for (bond_data, _), bond_value in zip(fidelity_bond_data, fidelity_bond_values)])
for ((parsed_bond, bond_utxo_data), fidelity_bond_value, bond_outpoint_conf_time)\
in zip(fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times):
fb = {
"counterparty": parsed_bond.maker_nick,
"utxo": {"txid": bintohex(parsed_bond.utxo[0]),
"vout": parsed_bond.utxo[1]},
"bond_value": fidelity_bond_value,
"locktime": parsed_bond.locktime,
"amount": bond_utxo_data["value"],
"address": bond_utxo_data["address"],
"utxo_confirmations": bond_utxo_data["confirms"],
"utxo_confirmation_timestamp": bond_outpoint_conf_time,
"utxo_pub": bintohex(parsed_bond.utxo_pub),
"cert_expiry": parsed_bond.cert_expiry
}
fidelitybonds.append(fb)
else:
fidelity_bond_values_dict = {}
offers = []
for row in rows:
o = dict(row)
if 'cjfee' in o:
if o['ordertype'] == 'swabsoffer'\
or o['ordertype'] == 'sw0absoffer':
o['cjfee'] = int(o['cjfee'])
else:
o['cjfee'] = str(Decimal(o['cjfee']))
o["fidelity_bond_value"] = fidelity_bond_values_dict.get(o["counterparty"], 0)
offers.append(o)
return {"offers": offers, "fidelitybonds": fidelitybonds}
def create_depth_chart(self, cj_amount, args=None):
if 'matplotlib' not in sys.modules:
return 'matplotlib not installed, charts not available'
if args is None:
args = {}
try:
self.taker.dblock.acquire(True)
rows = self.taker.db.execute('SELECT * FROM orderbook;').fetchall()
finally:
self.taker.dblock.release()
sqlorders = [o for o in rows if o["ordertype"] in filtered_offername_list]
orderfees = sorted([calc_cj_fee(o['ordertype'], o['cjfee'], cj_amount) / 1e8
for o in sqlorders
if o['minsize'] <= cj_amount <= o[
'maxsize']])
if len(orderfees) == 0:
return 'No orders at amount ' + str(cj_amount / 1e8)
fig = plt.figure()
scale = args.get("scale")
if (scale is not None) and (scale[0] == "log"):
orderfees = [float(fee) for fee in orderfees]
if orderfees[0] > 0:
ratio = orderfees[-1] / orderfees[0]
step = ratio ** 0.0333 # 1/30
bins = [orderfees[0] * (step ** i) for i in range(30)]
else:
ratio = orderfees[-1] / 1e-8 # single satoshi placeholder
step = ratio ** 0.0333 # 1/30
bins = [1e-8 * (step ** i) for i in range(30)]
bins[0] = orderfees[0] # replace placeholder
plt.xscale('log')
else:
bins = 30
if len(orderfees) == 1: # these days we have liquidity, but just in case...
plt.hist(orderfees, bins, rwidth=0.8, range=(0, orderfees[0] * 2))
else:
plt.hist(orderfees, bins, rwidth=0.8)
plt.grid()
plt.title('CoinJoin Orderbook Depth Chart for amount=' + str(cj_amount /
1e8) + 'btc')
plt.xlabel('CoinJoin Fee / btc')
plt.ylabel('Frequency')
return get_graph_html(fig)
def create_size_histogram(self, args):
if 'matplotlib' not in sys.modules:
return 'matplotlib not installed, charts not available'
try:
self.taker.dblock.acquire(True)
rows = self.taker.db.execute('SELECT maxsize, ordertype FROM orderbook;').fetchall()
finally:
self.taker.dblock.release()
rows = [o for o in rows if o["ordertype"] in filtered_offername_list]
ordersizes = sorted([r['maxsize'] / 1e8 for r in rows])
fig = plt.figure()
scale = args.get("scale")
if (scale is not None) and (scale[0] == "log"):
ratio = ordersizes[-1] / ordersizes[0]
step = ratio ** 0.0333 # 1/30
bins = [ordersizes[0] * (step ** i) for i in range(30)]
else:
bins = 30
plt.hist(ordersizes, bins, histtype='bar', rwidth=0.8)
if bins != 30:
fig.axes[0].set_xscale('log')
plt.grid()
plt.xlabel('Order sizes / btc')
plt.ylabel('Frequency')
return get_graph_html(fig) + ("<br/><a href='?scale=log'>log scale</a>" if
bins == 30 else "<br/><a href='?'>linear</a>")
def create_fidelity_bond_table(self, btc_unit):
if jm_single().bc_interface == None:
with self.taker.dblock:
fbonds = self.taker.db.execute("SELECT * FROM fidelitybonds;").fetchall()
fidelity_bond_data = []
for fb in fbonds:
try:
proof = FidelityBondProof.parse_and_verify_proof_msg(
fb["counterparty"],
fb["takernick"],
fb["proof"])
except ValueError:
proof = None
fidelity_bond_data.append((proof, None))
fidelity_bond_values = [-1]*len(fidelity_bond_data) #-1 means no data
bond_outpoint_conf_times = [-1]*len(fidelity_bond_data)
total_btc_committed_str = "unknown"
else:
(fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times) =\
get_fidelity_bond_data(self.taker)
total_btc_committed_str = satoshi_to_unit(
sum([utxo_data["value"] for _, utxo_data in fidelity_bond_data]),
None, btc_unit, 0)
RETARGET_INTERVAL = 2016
elem = lambda e: "<td>" + e + "</td>"
bondtable = ""
for (bond_data, utxo_data), bond_value, conf_time in zip(
fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times):
if bond_value == -1 or conf_time == -1 or utxo_data == None:
bond_value_str = "No data"
conf_time_str = "No data"
utxo_value_str = "No data"
else:
bond_value_str = satoshi_to_unit_power(bond_value, 2*unit_to_power[btc_unit])
conf_time_str = str(datetime.utcfromtimestamp(0) + timedelta(seconds=conf_time))
utxo_value_str = satoshi_to_unit(utxo_data["value"], None, btc_unit, 0)
bondtable += ("<tr>"
+ elem(bond_data.maker_nick)
+ elem(bintohex(bond_data.utxo[0]) + ":" + str(bond_data.utxo[1]))
+ elem(bond_value_str)
+ elem((datetime.utcfromtimestamp(0) + timedelta(seconds=bond_data.locktime)).strftime("%Y-%m-%d"))
+ elem(utxo_value_str)
+ elem(conf_time_str)
+ elem(str(bond_data.cert_expiry*RETARGET_INTERVAL))
+ elem(bintohex(btc.mk_freeze_script(bond_data.utxo_pub,
bond_data.locktime)))
+ "</tr>"
)
heading2 = (str(len(fidelity_bond_data)) + " fidelity bonds found with "
+ total_btc_committed_str + " " + btc_unit
+ " total locked up")
choose_units_form = (
'<form method="get" action="">' +
'<select name="btcunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_units)) +
'</select></form>')
choose_units_form = choose_units_form.replace(
'<option>' + btc_unit,
'<option selected="selected">' + btc_unit)
decodescript_tip = ("<br/>Tip: try running the RPC <code>decodescript "
+ "<redeemscript></code> as proof that the fidelity bond address matches the "
+ "locktime.<br/>Also run <code>gettxout <utxo_txid> <utxo_vout></code> "
+ "as proof that the fidelity bond UTXO is real.")
return (heading2,
choose_units_form + create_bonds_table_heading(btc_unit) + bondtable + "</table>"
+ decodescript_tip)
def create_sybil_resistance_page(self, btc_unit):
if jm_single().bc_interface == None:
return "", "Calculations unavailable, requires configured bitcoin node."
(fidelity_bond_data, fidelity_bond_values, bond_outpoint_conf_times) =\
get_fidelity_bond_data(self.taker)
choose_units_form = (
'<form method="get" action="">' +
'<select name="btcunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_units)) +
'</select></form>')
choose_units_form = choose_units_form.replace(
'<option>' + btc_unit,
'<option selected="selected">' + btc_unit)
mainbody = choose_units_form
honest_weight = sum(fidelity_bond_values)
mainbody += ("Assuming the makers in the offerbook right now are not sybil attackers, "
+ "how much would a sybil attacker starting now have to sacrifice to succeed in their"
+ " attack with 95% probability. Honest weight="
+ satoshi_to_unit_power(honest_weight, 2*unit_to_power[btc_unit]) + " " + btc_unit
+ "²<br/>Also assumes that takers are not price-sensitive and that their max "
+ "coinjoin fee is configured high enough that they dont exclude any makers.")
heading2 = "Sybil attacks from external enemies."
mainbody += ('<table class="tftable" border="1"><tr>'
+ '<th>Maker count</th>'
+ '<th>6month locked coins / ' + btc_unit + '</th>'
+ '<th>1y locked coins / ' + btc_unit + '</th>'
+ '<th>2y locked coins / ' + btc_unit + '</th>'
+ '<th>5y locked coins / ' + btc_unit + '</th>'
+ '<th>10y locked coins / ' + btc_unit + '</th>'
+ '<th>Required burned coins / ' + btc_unit + '</th>'
+ '</tr>'
)
timelocks = [0.5, 1.0, 2.0, 5.0, 10.0, None]
interest_rate = get_interest_rate()
for makercount, unit_success_sybil_weight in sybil.successful_attack_95pc_sybil_weight.items():
success_sybil_weight = unit_success_sybil_weight * honest_weight
row = "<tr><td>" + str(makercount) + "</td>"
for timelock in timelocks:
if timelock != None:
coins_per_sybil = sybil.weight_to_locked_coins(success_sybil_weight,
interest_rate, timelock)
else:
coins_per_sybil = sybil.weight_to_burned_coins(success_sybil_weight)
row += ("<td>" + satoshi_to_unit(coins_per_sybil*makercount, None, btc_unit, 0)
+ "</td>")
row += "</tr>"
mainbody += row
mainbody += "</table>"
mainbody += ("<h2>Sybil attacks from enemies within</h2>Assume a sybil attack is ongoing"
+ " right now and that the counterparties with the most valuable fidelity bonds are "
+ " actually controlled by the same entity. Then, what is the probability of a "
+ " successful sybil attack for a given makercount, and what is the fidelity bond "
+ " value being foregone by not putting all bitcoins into just one maker.")
mainbody += ('<table class="tftable" border="1"><tr>'
+ '<th>Maker count</th>'
+ '<th>Success probability</th>'
+ '<th>Foregone value / ' + btc_unit + '²</th>'
+ '</tr>'
)
#limited because calculation is slow, so this avoids server being too slow to respond
MAX_MAKER_COUNT_INTERNAL = 10
weights = sorted(fidelity_bond_values)[::-1]
for makercount in range(1, MAX_MAKER_COUNT_INTERNAL+1):
makercount_str = (str(makercount) + " - " + str(MAX_MAKER_COUNT_INTERNAL)
if makercount == len(fidelity_bond_data) and len(fidelity_bond_data) !=
MAX_MAKER_COUNT_INTERNAL else str(makercount))
success_prob = sybil.calculate_top_makers_sybil_attack_success_probability(weights,
makercount)
total_sybil_weight = sum(weights[:makercount])
sacrificed_values = [sybil.weight_to_burned_coins(w) for w in weights[:makercount]]
foregone_value = (sybil.coins_burned_to_weight(sum(sacrificed_values))
- total_sybil_weight)
mainbody += ("<tr><td>" + makercount_str + "</td><td>" + str(round(success_prob*100.0, 5))
+ "%</td><td>" + satoshi_to_unit_power(foregone_value, 2*unit_to_power[btc_unit])
+ "</td></tr>")
if makercount == len(weights):
break
mainbody += "</table>"
return heading2, mainbody
def create_orderbook_table(self, btc_unit, rel_unit):
result = ''
try:
self.taker.dblock.acquire(True)
rows = self.taker.db.execute('SELECT * FROM orderbook;').fetchall()
finally:
self.taker.dblock.release()
if not rows:
return 0, result
rows = [o for o in rows if o["ordertype"] in filtered_offername_list]
if jm_single().bc_interface == None:
for row in rows:
row["bondvalue"] = "No data"
else:
blocks = jm_single().bc_interface.get_current_block_height()
mediantime = jm_single().bc_interface.get_best_block_median_time()
interest_rate = get_interest_rate()
for row in rows:
with self.taker.dblock:
fbond_data = self.taker.db.execute(
"SELECT * FROM fidelitybonds WHERE counterparty=?;", (row["counterparty"],)
).fetchall()
if len(fbond_data) == 0:
row["bondvalue"] = "0"
continue
else:
try:
parsed_bond = FidelityBondProof.parse_and_verify_proof_msg(
fbond_data[0]["counterparty"],
fbond_data[0]["takernick"],
fbond_data[0]["proof"]
)
except ValueError:
row["bondvalue"] = "0"
continue
utxo_data = FidelityBondMixin.get_validated_timelocked_fidelity_bond_utxo(
parsed_bond.utxo, parsed_bond.utxo_pub, parsed_bond.locktime,
parsed_bond.cert_expiry, blocks)
if utxo_data == None:
row["bondvalue"] = "0"
continue
bond_value = FidelityBondMixin.calculate_timelocked_fidelity_bond_value(
utxo_data["value"],
jm_single().bc_interface.get_block_time(
jm_single().bc_interface.get_block_hash(
blocks - utxo_data["confirms"] + 1
)
),
parsed_bond.locktime,
mediantime,
interest_rate)
row["bondvalue"] = satoshi_to_unit_power(bond_value, 2*unit_to_power[btc_unit])
order_keys_display = (('ordertype', ordertype_display),
('counterparty', do_nothing),
('oid', order_str),
('cjfee', cjfee_display),
('txfee', satoshi_to_unit),
('minsize', satoshi_to_unit),
('maxsize', satoshi_to_unit),
('bondvalue', do_nothing))
# somewhat complex sorting to sort by cjfee but with swabsoffers on top
def orderby_cmp(x, y):
if x['ordertype'] == y['ordertype']:
return cmp(Decimal(x['cjfee']), Decimal(y['cjfee']))
return cmp(offername_list.index(x['ordertype']),
offername_list.index(y['ordertype']))
for o in sorted(rows, key=cmp_to_key(orderby_cmp)):
result += ' <tr>\n'
for key, displayer in order_keys_display:
result += ' <td>' + displayer(o[key], o, btc_unit,
rel_unit) + '</td>\n'
result += ' </tr>\n'
return len(rows), result
def get_counterparty_count(self):
try:
self.taker.dblock.acquire(True)
counterparties = self.taker.db.execute(
'SELECT DISTINCT counterparty FROM orderbook WHERE ordertype=? OR ordertype=?;',
filtered_offername_list).fetchall()
finally:
self.taker.dblock.release()
return str(len(counterparties))
def do_GET(self):
# http.server.SimpleHTTPRequestHandler.do_GET(self)
# print('httpd received ' + self.path + ' request')
self.path, query = self.path.split('?', 1) if '?' in self.path else (
self.path, '')
args = parse_qs(query)
pages = ['/', '/fidelitybonds', '/ordersize', '/depth', '/sybilresistance',
'/orderbook.json']
static_files = {'/vendor/sorttable.js', '/vendor/bootstrap.min.css', '/vendor/jquery-3.5.1.slim.min.js'}
if self.path in static_files or self.path not in pages:
return super().do_GET()
fd = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'orderbook.html'), 'r')
orderbook_fmt = fd.read()
fd.close()
alert_msg = ''
if jm_single().joinmarket_alert[0]:
alert_msg = '<br />JoinMarket Alert Message:<br />' + \
jm_single().joinmarket_alert[0]
if self.path == '/':
btc_unit = args['btcunit'][
0] if 'btcunit' in args else sorted_units[0]
rel_unit = args['relunit'][
0] if 'relunit' in args else sorted_rel_units[0]
if btc_unit not in sorted_units:
btc_unit = sorted_units[0]
if rel_unit not in sorted_rel_units:
rel_unit = sorted_rel_units[0]
ordercount, ordertable = self.create_orderbook_table(
btc_unit, rel_unit)
choose_units_form = create_choose_units_form(btc_unit, rel_unit)
table_heading = create_offerbook_table_heading(btc_unit, rel_unit)
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'JoinMarket Orderbook',
'SECONDHEADING':
(str(ordercount) + ' orders found by ' +
self.get_counterparty_count() + ' counterparties' + alert_msg),
'MAINBODY': (
rotateObform + refresh_orderbook_form + choose_units_form +
table_heading + ordertable + '</table>\n')
}
elif self.path == '/fidelitybonds':
btc_unit = args['btcunit'][0] if 'btcunit' in args else sorted_units[0]
if btc_unit not in sorted_units:
btc_unit = sorted_units[0]
heading2, mainbody = self.create_fidelity_bond_table(btc_unit)
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Fidelity Bonds',
'SECONDHEADING': heading2,
'MAINBODY': mainbody
}
elif self.path == '/ordersize':
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Order Sizes',
'SECONDHEADING': 'Order Size Histogram' + alert_msg,
'MAINBODY': self.create_size_histogram(args)
}
elif self.path.startswith('/depth'):
# if self.path[6] == '?':
# quantity =
cj_amounts = [10 ** cja for cja in range(4, 12, 1)]
mainbody = [self.create_depth_chart(cja, args) \
for cja in cj_amounts] + \
["<br/><a href='?'>linear</a>" if args.get("scale") \
else "<br/><a href='?scale=log'>log scale</a>"]
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Depth Chart',
'SECONDHEADING': 'Orderbook Depth' + alert_msg,
'MAINBODY': '<br />'.join(mainbody)
}
elif self.path == '/sybilresistance':
btc_unit = args['btcunit'][0] if 'btcunit' in args else sorted_units[0]
if btc_unit not in sorted_units:
btc_unit = sorted_units[0]
heading2, mainbody = self.create_sybil_resistance_page(btc_unit)
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Resistance to Sybil Attacks from Fidelity Bonds',
'SECONDHEADING': heading2,
'MAINBODY': mainbody
}
elif self.path == '/orderbook.json':
replacements = {}
orderbook_fmt = json.dumps(self.create_orderbook_obj())
orderbook_page = orderbook_fmt
for key, rep in iteritems(replacements):
orderbook_page = orderbook_page.replace(key, rep)
self.send_response(200)
if self.path.endswith('.json'):
self.send_header('Content-Type', 'application/json')
else:
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(orderbook_page))
self.end_headers()
self.wfile.write(orderbook_page.encode('utf-8'))
def do_POST(self):
global filtered_offername_list
pages = ['/refreshorderbook', '/rotateOb']
if self.path not in pages:
return
if self.path == '/refreshorderbook':
self.taker.msgchan.request_orderbook()
time.sleep(5)
self.path = '/'
self.do_GET()
elif self.path == '/rotateOb':
if filtered_offername_list == sw0offers:
log.debug('Showing nested segwit orderbook')
filtered_offername_list = swoffers
elif filtered_offername_list == swoffers:
log.debug('Showing native segwit orderbook')
filtered_offername_list = sw0offers
self.path = '/'
self.do_GET()
class HTTPDThread(threading.Thread):
def __init__(self, taker, hostport):
threading.Thread.__init__(self, name='HTTPDThread')
self.daemon = True
self.taker = taker
self.hostport = hostport
def run(self):
# hostport = ('localhost', 62601)
try:
httpd = http.server.HTTPServer(self.hostport,
OrderbookPageRequestHeader)
except Exception as e:
print("Failed to start HTTP server: " + str(e))
os._exit(EXIT_FAILURE)
httpd.taker = self.taker
print('\nstarted http server, visit http://{0}:{1}/\n'.format(
*self.hostport))
httpd.serve_forever()
class ObBasic(OrderbookWatch):
"""Dummy orderbook watch class
with hooks for triggering orderbook request"""
def __init__(self, msgchan, hostport):
self.hostport = hostport
self.set_msgchan(msgchan)
def on_welcome(self):
"""TODO: It will probably be a bit
simpler, and more consistent, to use
a twisted http server here instead
of a thread."""
HTTPDThread(self, self.hostport).start()
self.request_orderbook()
def request_orderbook(self):
self.msgchan.request_orderbook()
class ObIRCMessageChannel(IRCMessageChannel):
"""A customisation of the message channel
to allow receipt of privmsgs without the
verification hooks in client-daemon communication."""
def on_privmsg(self, nick, message):
if len(message) < 2:
return
if message[0] != COMMAND_PREFIX:
log.debug('message not a cmd')
return
cmd_string = message[1:].split(' ')[0]
if cmd_string not in offername_list:
log.debug('non-offer ignored')
return
#Ignore sigs (TODO better to include check)
sig = message[1:].split(' ')[-2:]
#reconstruct original message without cmd pref
rawmessage = ' '.join(message[1:].split(' ')[:-2])
for command in rawmessage.split(COMMAND_PREFIX):
_chunks = command.split(" ")
try:
self.check_for_orders(nick, _chunks)
self.check_for_fidelity_bond(nick, _chunks)
except:
pass
def get_dummy_nick():
"""In Joinmarket-CS nick creation is negotiated
between client and server/daemon so as to allow
client to sign for messages; here we only ever publish
an orderbook request, so no such need, but for better
privacy, a conformant nick is created based on a random
pseudo-pubkey."""
nick_pkh_raw = hashlib.sha256(os.urandom(10)).digest()[:NICK_HASH_LENGTH]
nick_pkh = btc.base58.encode(nick_pkh_raw)
#right pad to maximum possible; b58 is not fixed length.
#Use 'O' as one of the 4 not included chars in base58.
nick_pkh += 'O' * (NICK_MAX_ENCODED - len(nick_pkh))
#The constructed length will be 1 + 1 + NICK_MAX_ENCODED
nick = JOINMARKET_NICK_HEADER + str(JM_VERSION) + nick_pkh
jm_single().nickname = nick
return nick
def main():
parser = OptionParser(
usage='usage: %prog [options]',
description='Runs a webservice which shows the orderbook.')
add_base_options(parser)
parser.add_option('-H',
'--host',
action='store',
type='string',
dest='host',
default='localhost',
help='hostname or IP to bind to, default=localhost')
parser.add_option('-p',
'--port',
action='store',
type='int',
dest='port',
help='port to listen on, default=62601',
default=62601)
(options, args) = parser.parse_args()
load_program_config(config_path=options.datadir)
hostport = (options.host, options.port)
mcs = [ObIRCMessageChannel(c) for c in get_irc_mchannels()]
mcc = MessageChannelCollection(mcs)
mcc.set_nick(get_dummy_nick())
taker = ObBasic(mcc, hostport)
log.info("Starting ob-watcher")
mcc.run()
if __name__ == "__main__":
main()
reactor.run()
print('done')
|
undeath/joinmarket-clientserver
|
scripts/obwatch/ob-watcher.py
|
Python
|
gpl-3.0
| 35,777
|
[
"VisIt"
] |
7b8aedc5ddf0cb12fc936f5ec157537f5b8997395de7a7dfa7b086a339aa6170
|
'''Read and write DNA sequences.'''
import csv
import os
from Bio import SeqIO
from Bio.Alphabet.IUPAC import ambiguous_dna
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation, ExactPosition
from Bio.SeqFeature import CompoundLocation
import coral
import coral.constants.genbank
class PrimerAnnotationError(ValueError):
pass
class FeatureNameError(ValueError):
pass
def read_dna(path):
'''Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
genbank_exts = ['.gb', '.ape']
fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
abi_exts = ['.abi', '.ab1']
if any([ext == extension for extension in genbank_exts]):
file_format = 'genbank'
elif any([ext == extension for extension in fasta_exts]):
file_format = 'fasta'
elif any([ext == extension for extension in abi_exts]):
file_format = 'abi'
else:
raise ValueError('File format not recognized.')
seq = SeqIO.read(path, file_format)
dna = coral.DNA(str(seq.seq))
if seq.name == '.':
dna.name = filename
else:
dna.name = seq.name
# Features
for feature in seq.features:
try:
dna.features.append(_seqfeature_to_coral(feature))
except FeatureNameError:
pass
dna.features = sorted(dna.features, key=lambda feature: feature.start)
# Used to use data_file_division, but it's inconsistent (not always the
# molecule type)
dna.circular = False
with open(path) as f:
first_line = f.read().split()
for word in first_line:
if word == 'circular':
dna.circular = True
return dna
def read_sequencing(directory):
'''Read .seq and .abi/.ab1 results files from a dir.
:param directory: Path to directory containing sequencing files.
:type directory: str
:returns: A list of DNA sequences.
:rtype: coral.DNA list
'''
dirfiles = os.listdir(directory)
seq_exts = ['.seq', '.abi', '.ab1']
# Exclude files that aren't sequencing results
seq_paths = [x for x in dirfiles if os.path.splitext(x)[1] in seq_exts]
paths = [os.path.join(directory, x) for x in seq_paths]
sequences = [read_dna(x) for x in paths]
return sequences
def write_dna(dna, path):
'''Write DNA to a file (genbank or fasta).
:param dna: DNA sequence to write to file
:type dna: coral.DNA
:param path: file path to write. Has to be genbank or fasta file.
:type path: str
'''
# Check if path filetype is valid, remember for later
ext = os.path.splitext(path)[1]
if ext == '.gb' or ext == '.ape':
filetype = 'genbank'
elif ext == '.fa' or ext == '.fasta':
filetype = 'fasta'
else:
raise ValueError('Only genbank or fasta files are supported.')
# Convert features to Biopython form
# Information lost on conversion:
# specificity of feature type
# strandedness
# topology
features = []
for feature in dna.features:
features.append(_coral_to_seqfeature(feature))
# Biopython doesn't like 'None' here
# FIXME: this is a legacy feature - remove?
bio_id = dna.id if hasattr(dna, 'id') else ''
# Maximum length of name is 16
seq = SeqRecord(Seq(str(dna), alphabet=ambiguous_dna), id=bio_id,
name=dna.name[0:16].replace(' ', '_'), features=features,
description=dna.name)
if dna.circular:
seq.annotations['data_file_division'] = 'circular'
else:
seq.annotations['data_file_division'] = 'linear'
if filetype == 'genbank':
SeqIO.write(seq, path, 'genbank')
elif filetype == 'fasta':
SeqIO.write(seq, path, 'fasta')
def write_primers(primer_list, path, names=None, notes=None):
'''Write a list of primers out to a csv file. The first three columns are
compatible with the current IDT order form (name, sequence, notes). By
default there are no notes, which is an optional parameter.
:param primer_list: A list of primers.
:type primer_list: coral.Primer list
:param path: A path to the csv you want to write.
:type path: str
:param names: A list of strings to name each oligo. Must be the same length
as the primer_list.
:type names: str list
:param notes: A list of strings to provide a note for each oligo. Must be
the same length as the primer_list.
:type notes: str list
'''
# Check for notes and names having the right length, apply them to primers
if names is not None:
if len(names) != len(primer_list):
names_msg = 'Mismatch in number of notes and primers.'
raise PrimerAnnotationError(names_msg)
for i, name in enumerate(names):
primer_list[i].name = name
if notes is not None:
if len(notes) != len(primer_list):
notes_msg = 'Mismatch in number of notes and primers.'
raise PrimerAnnotationError(notes_msg)
for i, note in enumerate(notes):
primer_list[i].note = note
# Write to csv
with open(path, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['name', 'sequence', 'notes'])
for primer in primer_list:
string_rep = str(primer.overhang).lower() + str(primer.anneal)
writer.writerow([primer.name, string_rep, primer.note])
def _process_feature_type(feature_type, bio_to_coral=True):
'''Translate genbank feature types into usable ones (currently identical).
The feature table is derived from the official genbank spec (gbrel.txt)
available at http://www.insdc.org/documents/feature-table
:param feature_type: feature to convert
:type feature_type: str
:param bio_to_coral: from coral to Biopython (True) or the other direction
(False)
:param bio_to_coral: bool
:returns: coral version of genbank feature_type, or vice-versa.
:rtype: str
'''
err_msg = 'Unrecognized feature type: {}'.format(feature_type)
if bio_to_coral:
try:
name = coral.constants.genbank.TO_CORAL[feature_type]
except KeyError:
raise ValueError(err_msg)
else:
try:
name = coral.constants.genbank.TO_BIO[feature_type]
except KeyError:
raise ValueError(err_msg)
return name
def _seqfeature_to_coral(feature):
'''Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature
'''
# Some genomic sequences don't have a label attribute
# TODO: handle genomic cases differently than others. Some features lack
# a label but should still be incorporated somehow.
qualifiers = feature.qualifiers
if 'label' in qualifiers:
feature_name = qualifiers['label'][0]
elif 'locus_tag' in qualifiers:
feature_name = qualifiers['locus_tag'][0]
else:
raise FeatureNameError('Unrecognized feature name')
# Features with gaps are special, require looking at subfeatures
# Assumption: subfeatures are never more than one level deep
if feature.location_operator == 'join':
# Feature has gaps. Have to figure out start/stop from subfeatures,
# calculate gap indices. A nested feature model may be required
# eventually.
# Reorder the sub_feature list by start location
# Assumption: none of the subfeatures overlap so the last entry in
# the reordered list also has the final stop point of the feature.
# FIXME: Getting a deprecation warning about using sub_features
# instead of feature.location being a CompoundFeatureLocation
reordered = sorted(feature.location.parts,
key=lambda location: location.start)
starts = [int(location.start) for location in reordered]
stops = [int(location.end) for location in reordered]
feature_start = starts.pop(0)
feature_stop = stops.pop(-1)
starts = [start - feature_start for start in starts]
stops = [stop - feature_start for stop in stops]
feature_gaps = list(zip(stops, starts))
else:
# Feature doesn't have gaps. Ignore subfeatures.
feature_start = int(feature.location.start)
feature_stop = int(feature.location.end)
feature_gaps = []
feature_type = _process_feature_type(feature.type)
if feature.location.strand == -1:
feature_strand = 1
else:
feature_strand = 0
if 'gene' in qualifiers:
gene = qualifiers['gene']
else:
gene = []
if 'locus_tag' in qualifiers:
locus_tag = qualifiers['locus_tag']
else:
locus_tag = []
coral_feature = coral.Feature(feature_name, feature_start,
feature_stop, feature_type,
gene=gene, locus_tag=locus_tag,
qualifiers=qualifiers,
strand=feature_strand,
gaps=feature_gaps)
return coral_feature
def _coral_to_seqfeature(feature):
'''Convert a coral.Feature to a Biopython SeqFeature.
:param feature: coral Feature.
:type feature: coral.Feature
'''
bio_strand = 1 if feature.strand == 1 else -1
ftype = _process_feature_type(feature.feature_type, bio_to_coral=False)
sublocations = []
if feature.gaps:
# There are gaps. Have to define location_operator and add subfeatures
location_operator = 'join'
# Feature location means nothing for 'join' sequences?
# TODO: verify
location = FeatureLocation(ExactPosition(0), ExactPosition(1),
strand=bio_strand)
# Reconstruct start/stop indices for each subfeature
stops, starts = zip(*feature.gaps)
starts = [feature.start] + [start + feature.start for start in starts]
stops = [stop + feature.start for stop in stops] + [feature.stop]
# Build subfeatures
for start, stop in zip(starts, stops):
sublocation = FeatureLocation(ExactPosition(start),
ExactPosition(stop),
strand=bio_strand)
sublocations.append(sublocation)
location = CompoundLocation(sublocations, operator='join')
else:
# No gaps, feature is simple
location_operator = ''
location = FeatureLocation(ExactPosition(feature.start),
ExactPosition(feature.stop),
strand=bio_strand)
qualifiers = feature.qualifiers
qualifiers['label'] = [feature.name]
seqfeature = SeqFeature(location, type=ftype,
qualifiers=qualifiers,
location_operator=location_operator)
return seqfeature
|
klavinslab/coral
|
coral/seqio/_dna.py
|
Python
|
mit
| 11,219
|
[
"Biopython"
] |
3e23553f84da76aa18783b1540079bed4b5d39c644afe88bf6237d3b8740f50a
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Classes for representing multi-dimensional data with metadata.
"""
from collections import OrderedDict
from collections.abc import Container, Iterable, Iterator, MutableMapping
import copy
from copy import deepcopy
from functools import partial, reduce
import operator
import warnings
from xml.dom.minidom import Document
import zlib
import dask.array as da
import numpy as np
import numpy.ma as ma
import iris._constraints
from iris._data_manager import DataManager
import iris._lazy_data as _lazy
import iris._merge
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.aux_factory
from iris.common import CFVariableMixin, CubeMetadata, metadata_manager_factory
from iris.common.metadata import metadata_filter
import iris.coord_systems
import iris.coords
import iris.exceptions
import iris.util
__all__ = ["Cube", "CubeList"]
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter:
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection:
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection(
[pair.merged(unique) for pair in self.pairs]
)
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError(
"All items in list_of_cubes must be Cube " "instances."
)
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = [
"%s: %s" % (i, cube.summary(shorten=True))
for i, cube in enumerate(self)
]
if result:
result = "\n".join(result)
else:
result = "< No cubes >"
return result
def __repr__(self):
"""Runs repr on every cube."""
return "[%s]" % ",\n".join([repr(cube) for cube in self])
def _repr_html_(self):
from iris.experimental.representation import CubeListRepresentation
representer = CubeListRepresentation(self)
return representer.repr_html()
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super().__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super().__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder
)
)
doc.appendChild(cubes_xml_element)
# return our newly created XML string
doc = Cube._sort_xml_attrs(doc)
return doc.toprettyxml(indent=" ")
def extract(self, constraints):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Args:
* constraints (:class:`~iris.Constraint` or iterable of constraints):
A single constraint or an iterable.
"""
return self._extract_and_merge(self, constraints, strict=False)
def extract_cube(self, constraint):
"""
Extract a single cube from a CubeList, and return it.
Raise an error if the extract produces no cubes, or more than one.
Args:
* constraint (:class:`~iris.Constraint`):
The constraint to extract with.
.. see also::
:meth:`~iris.cube.CubeList.extract`
"""
# Just validate this, so we can accept strings etc, but not multiples.
constraint = iris._constraints.as_constraint(constraint)
return self._extract_and_merge(
self, constraint, strict=True, return_single_cube=True
)
def extract_cubes(self, constraints):
"""
Extract specific cubes from a CubeList, one for each given constraint.
Each constraint must produce exactly one cube, otherwise an error is
raised.
Args:
* constraints (iterable of, or single, :class:`~iris.Constraint`):
The constraints to extract with.
.. see also::
:meth:`~iris.cube.CubeList.extract`
"""
return self._extract_and_merge(
self, constraints, strict=True, return_single_cube=False
)
@staticmethod
def _extract_and_merge(
cubes, constraints, strict=False, return_single_cube=False
):
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict(
[(constraint, CubeList()) for constraint in constraints]
)
for cube in cubes:
for constraint, cube_list in constraint_groups.items():
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = "Got %s cubes for constraint %r, " "expecting 1." % (
len(constraint_cubes),
constraint,
)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if return_single_cube:
if len(result) != 1:
# Practically this should never occur, as we now *only* request
# single cube result for 'extract_cube'.
msg = "Got {!s} cubes for constraints {!r}, expecting 1."
raise iris.exceptions.ConstraintMismatchError(
msg.format(len(result), constraints)
)
result = result[0]
return result
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, str):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(
cell in cube.coord(coord_name).cells() for cube in self
)
return overlap_fn
coord_values = {
coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names
}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
(merged_cube,) = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
Cubes may contain additional dimensional elements such as auxiliary
coordinates, cell measures or ancillary variables.
A group of similar cubes can only merge to a single result if all such
elements are identical in every input cube : they are then present,
unchanged, in the merged output cube.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Emulate Python 2 behaviour.
def _none_sort(item):
return (item is not None, item)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name, key=_none_sort):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(
self,
check_aux_coords=True,
check_cell_measures=True,
check_ancils=True,
):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
Kwargs:
* check_aux_coords
Checks the auxiliary coordinates of the cubes match. This check
is not applied to auxiliary coordinates that span the dimension
the concatenation is occurring along. Defaults to True.
* check_cell_measures
Checks the cell measures of the cubes match. This check
is not applied to cell measures that span the dimension
the concatenation is occurring along. Defaults to True.
* check_ancils
Checks the ancillary variables of the cubes match. This check
is not applied to ancillary variables that span the dimension
the concatenation is occurring along. Defaults to True.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
from iris._concatenate import concatenate
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = concatenate(
self,
error_on_mismatch=True,
check_aux_coords=check_aux_coords,
check_cell_measures=check_cell_measures,
check_ancils=check_ancils,
)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append("An unexpected problem prevented concatenation.")
msgs.append(
"Expected only a single cube, "
"found {}.".format(n_res_cubes)
)
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append(
"Cube names differ: {} != {}".format(
unique_names[0], unique_names[1]
)
)
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(
self,
check_aux_coords=True,
check_cell_measures=True,
check_ancils=True,
):
"""
Concatenate the cubes over their common dimensions.
Kwargs:
* check_aux_coords
Checks the auxiliary coordinates of the cubes match. This check
is not applied to auxiliary coordinates that span the dimension
the concatenation is occurring along. Defaults to True.
* check_cell_measures
Checks the cell measures of the cubes match. This check
is not applied to cell measures that span the dimension
the concatenation is occurring along. Defaults to True.
* check_ancils
Checks the ancillary variables of the cubes match. This check
is not applied to ancillary variables that span the dimension
the concatenation is occurring along. Defaults to True.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
Cubes may contain 'extra' dimensional elements such as auxiliary
coordinates, cell measures or ancillary variables.
For a group of similar cubes to concatenate together into one output, all
such elements which do not map to the concatenation axis must be identical
in every input cube : these then appear unchanged in the output.
Similarly, those elements which *do* map to the concatenation axis must
have matching properties, but may have different data values : these then
appear, concatenated, in the output cube.
If any cubes in a group have dimensional elements which do not match
correctly, the group will not concatenate to a single output cube.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
from iris._concatenate import concatenate
return concatenate(
self,
check_aux_coords=check_aux_coords,
check_cell_measures=check_cell_measures,
check_ancils=check_ancils,
)
def realise_data(self):
"""
Fetch 'real' data for all cubes, in a shared calculation.
This computes any lazy data, equivalent to accessing each `cube.data`.
However, lazy calculations and data fetches can be shared between the
computations, improving performance.
For example::
# Form stats.
a_std = cube_a.collapsed(['x', 'y'], iris.analysis.STD_DEV)
b_std = cube_b.collapsed(['x', 'y'], iris.analysis.STD_DEV)
ab_mean_diff = (cube_b - cube_a).collapsed(['x', 'y'],
iris.analysis.MEAN)
std_err = (a_std * a_std + b_std * b_std) ** 0.5
# Compute these stats together (avoiding multiple data passes).
CubeList([a_std, b_std, ab_mean_diff, std_err]).realise_data()
.. Note::
Cubes with non-lazy data are not affected.
"""
_lazy.co_realise_cubes(*self)
def copy(self):
"""
Return a CubeList when CubeList.copy() is called.
"""
if type(self) == CubeList:
return deepcopy(self)
def _is_single_item(testee):
"""
Return whether this is a single item, rather than an iterable.
We count string types as 'single', also.
"""
return isinstance(testee, str) or not isinstance(testee, Iterable)
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period \
6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time 1998-03-01 03:00:00
pressure 1000.0 hPa
time \
1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Cell methods:
mean within years time
mean over years time
Attributes:
STASH m01s16i203
source 'Data from Met Office Unified Model'
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
@classmethod
def _sort_xml_attrs(cls, doc):
"""
Takes an xml document and returns a copy with all element
attributes sorted in alphabetical order.
This is a private utility method required by iris to maintain
legacy xml behaviour beyond python 3.7.
Args:
* doc:
The :class:`xml.dom.minidom.Document`.
Returns:
The :class:`xml.dom.minidom.Document` with sorted element
attributes.
"""
from xml.dom.minidom import Document
def _walk_nodes(node):
"""Note: _walk_nodes is called recursively on child elements."""
# we don't want to copy the children here, so take a shallow copy
new_node = node.cloneNode(deep=False)
# Versions of python <3.8 order attributes in alphabetical order.
# Python >=3.8 order attributes in insert order. For consistent behaviour
# across both, we'll go with alphabetical order always.
# Remove all the attribute nodes, then add back in alphabetical order.
attrs = [
new_node.getAttributeNode(attr_name).cloneNode(deep=True)
for attr_name in sorted(node.attributes.keys())
]
for attr in attrs:
new_node.removeAttributeNode(attr)
for attr in attrs:
new_node.setAttributeNode(attr)
if node.childNodes:
children = [_walk_nodes(x) for x in node.childNodes]
for c in children:
new_node.appendChild(c)
return new_node
nodes = _walk_nodes(doc.documentElement)
new_doc = Document()
new_doc.appendChild(nodes)
return new_doc
def __init__(
self,
data,
standard_name=None,
long_name=None,
var_name=None,
units=None,
attributes=None,
cell_methods=None,
dim_coords_and_dims=None,
aux_coords_and_dims=None,
aux_factories=None,
cell_measures_and_dims=None,
ancillary_variables_and_dims=None,
):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
``data`` can be a dask array, a NumPy array, a NumPy array
subclass (such as :class:`numpy.ma.MaskedArray`), or
array_like (as described in :func:`numpy.asarray`).
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The NetCDF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
* cell_measures_and_dims
A list of CellMeasures with dimension mappings.
* ancillary_variables_and_dims
A list of AncillaryVariables with dimension mappings.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, str):
raise TypeError("Invalid data type: {!r}.".format(data))
# Configure the metadata manager.
self._metadata_manager = metadata_manager_factory(CubeMetadata)
# Initialise the cube data manager.
self._data_manager = DataManager(data)
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`cf_units.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The NetCDF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
# Cell Measures
self._cell_measures_and_dims = []
# Ancillary Variables
self._ancillary_variables_and_dims = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
if cell_measures_and_dims:
for cell_measure, dims in cell_measures_and_dims:
self.add_cell_measure(cell_measure, dims)
if ancillary_variables_and_dims:
for ancillary_variable, dims in ancillary_variables_and_dims:
self.add_ancillary_variable(ancillary_variable, dims)
@property
def _names(self):
"""
A tuple containing the value of each name participating in the identity
of a :class:`iris.cube.Cube`. This includes the standard name,
long name, NetCDF variable name, and the STASH from the attributes
dictionary.
"""
return self._metadata_manager._names
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (
self.name() == other.name()
and self.units == other.units
and self.cell_methods == other.cell_methods
)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, str):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
This operation preserves lazy data.
"""
# If the cube has units convert the data.
if self.units.is_unknown():
raise iris.exceptions.UnitConversionError(
"Cannot convert from unknown units. "
'The "cube.units" attribute may be set directly.'
)
if self.has_lazy_data():
# Make fixed copies of old + new units for a delayed conversion.
old_unit = self.units
new_unit = unit
pointwise_convert = partial(old_unit.convert, other=new_unit)
new_data = _lazy.lazy_elementwise(
self.lazy_data(), pointwise_convert
)
else:
new_data = self.units.convert(self.data, unit)
self.data = new_data
self.units = unit
def add_cell_method(self, cell_method):
"""Add a :class:`~iris.coords.CellMethod` to the Cube."""
self.cell_methods += (cell_method,)
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError("Duplicate coordinates are not permitted.")
self._add_unique_aux_coord(coord, data_dims)
def _check_multi_dim_metadata(self, metadata, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != metadata.ndim:
msg = (
"Invalid data dimensions: {} given, {} expected for "
"{!r}.".format(
len(data_dims), metadata.ndim, metadata.name()
)
)
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if metadata.shape[i] != self.shape[dim]:
msg = (
"Unequal lengths. Cube dimension {} => {};"
" metadata {!r} dimension {} => {}."
)
raise ValueError(
msg.format(
dim,
self.shape[dim],
metadata.name(),
i,
metadata.shape[i],
)
)
elif metadata.shape != (1,):
msg = "Missing data dimensions for multi-valued {} {!r}"
msg = msg.format(metadata.__class__.__name__, metadata.name())
raise ValueError(msg)
return data_dims
def _add_unique_aux_coord(self, coord, data_dims):
data_dims = self._check_multi_dim_metadata(coord, data_dims)
if hasattr(coord, "mesh"):
mesh = self.mesh
if mesh:
msg = (
"{item} of Meshcoord {coord!r} is "
"{thisval!r}, which does not match existing "
"cube {item} of {ownval!r}."
)
if coord.mesh != mesh:
raise ValueError(
msg.format(
item="mesh",
coord=coord,
thisval=coord.mesh,
ownval=mesh,
)
)
location = self.location
if coord.location != location:
raise ValueError(
msg.format(
item="location",
coord=coord,
thisval=coord.location,
ownval=location,
)
)
mesh_dims = (self.mesh_dim(),)
if data_dims != mesh_dims:
raise ValueError(
msg.format(
item="mesh dimension",
coord=coord,
thisval=data_dims,
ownval=mesh_dims,
)
)
self._aux_coords_and_dims.append((coord, data_dims))
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError(
"Factory must be a subclass of "
"iris.aux_factory.AuxCoordFactory."
)
cube_coords = self.coords()
for dependency in aux_factory.dependencies:
ref_coord = aux_factory.dependencies[dependency]
if ref_coord is not None and ref_coord not in cube_coords:
msg = "{} coordinate for factory is not present on cube {}"
raise ValueError(msg.format(ref_coord.name(), self.name()))
self._aux_factories.append(aux_factory)
def add_cell_measure(self, cell_measure, data_dims=None):
"""
Adds a CF cell measure to the cube.
Args:
* cell_measure
The :class:`iris.coords.CellMeasure`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a cell_measure with identical metadata already
exists on the cube.
See also
:meth:`Cube.remove_cell_measure()<iris.cube.Cube.remove_cell_measure>`.
"""
if self.cell_measures(cell_measure):
raise ValueError("Duplicate cell_measures are not permitted.")
data_dims = self._check_multi_dim_metadata(cell_measure, data_dims)
self._cell_measures_and_dims.append((cell_measure, data_dims))
self._cell_measures_and_dims.sort(
key=lambda cm_dims: (cm_dims[0].metadata, cm_dims[1])
)
def add_ancillary_variable(self, ancillary_variable, data_dims=None):
"""
Adds a CF ancillary variable to the cube.
Args:
* ancillary_variable
The :class:`iris.coords.AncillaryVariable` instance to be added to
the cube
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the ancillary variable.
Raises a ValueError if an ancillary variable with identical metadata
already exists on the cube.
"""
if self.ancillary_variables(ancillary_variable):
raise ValueError("Duplicate ancillary variables not permitted")
data_dims = self._check_multi_dim_metadata(
ancillary_variable, data_dims
)
self._ancillary_variables_and_dims.append(
(ancillary_variable, data_dims)
)
self._ancillary_variables_and_dims.sort(
key=lambda av_dims: (av_dims[0].metadata, av_dims[1])
)
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError(
"The coordinate already exists on the cube. "
"Duplicate coordinates are not permitted."
)
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError(
"A dim_coord is already associated with "
"dimension %d." % data_dim
)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError("The dim_coord may not be an AuxCoord instance.")
# Convert data_dim to a single integer
if isinstance(data_dim, Container):
if len(data_dim) != 1:
raise ValueError(
"The supplied data dimension must be a" " single number."
)
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError(
"The cube does not have the specified dimension "
"(%d)" % data_dim
)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = "Unequal lengths. Cube dimension {} => {}; coord {!r} => {}."
raise ValueError(
msg.format(
data_dim,
self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points),
)
)
self._dim_coords_and_dims.append((dim_coord, int(data_dim)))
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [
(coord_, dim)
for coord_, dim in self._dim_coords_and_dims
if coord_ is not coord
]
self._aux_coords_and_dims = [
(coord_, dims)
for coord_, dims in self._aux_coords_and_dims
if coord_ is not coord
]
for aux_factory in self.aux_factories:
if coord.metadata == aux_factory.metadata:
self.remove_aux_factory(aux_factory)
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def remove_cell_measure(self, cell_measure):
"""
Removes a cell measure from the cube.
Args:
* cell_measure (string or cell_measure)
The (name of the) cell measure to remove from the cube. As either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris.common.CFVariableMixin`.
(b) a cell_measure instance with metadata equal to that of
the desired cell_measures.
.. note::
If the argument given does not represent a valid cell_measure on
the cube, an :class:`iris.exceptions.CellMeasureNotFoundError`
is raised.
.. seealso::
:meth:`Cube.add_cell_measure()<iris.cube.Cube.add_cell_measure>`
"""
cell_measure = self.cell_measure(cell_measure)
self._cell_measures_and_dims = [
(cell_measure_, dim)
for cell_measure_, dim in self._cell_measures_and_dims
if cell_measure_ is not cell_measure
]
def remove_ancillary_variable(self, ancillary_variable):
"""
Removes an ancillary variable from the cube.
Args:
* ancillary_variable (string or AncillaryVariable)
The (name of the) AncillaryVariable to remove from the cube.
"""
ancillary_variable = self.ancillary_variable(ancillary_variable)
self._ancillary_variables_and_dims = [
(ancillary_variable_, dim)
for ancillary_variable_, dim in self._ancillary_variables_and_dims
if ancillary_variable_ is not ancillary_variable
]
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
name_provided = False
if isinstance(coord, str):
# Forced to look-up the coordinate if we only have the name.
coord = self.coord(coord)
name_provided = True
coord_id = id(coord)
# Dimension of dimension coordinate by object id
dims_by_id = {id(c): (d,) for c, d in self._dim_coords_and_dims}
# Check for id match - faster than equality check
match = dims_by_id.get(coord_id)
if match is None:
# Dimension/s of auxiliary coordinate by object id
aux_dims_by_id = {id(c): d for c, d in self._aux_coords_and_dims}
# Check for id match - faster than equality
match = aux_dims_by_id.get(coord_id)
if match is None:
dims_by_id.update(aux_dims_by_id)
if match is None and not name_provided:
# We may have an equivalent coordinate but not the actual
# cube coordinate instance - so forced to perform coordinate
# lookup to attempt to retrieve it
coord = self.coord(coord)
# Check for id match - faster than equality
match = dims_by_id.get(id(coord))
# Search derived aux coordinates
if match is None:
target_metadata = coord.metadata
def matcher(factory):
return factory.metadata == target_metadata
factories = filter(matcher, self._aux_factories)
matches = [
factory.derived_dims(self.coord_dims) for factory in factories
]
if matches:
match = matches[0]
if match is None:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return match
def cell_measure_dims(self, cell_measure):
"""
Returns a tuple of the data dimensions relevant to the given
CellMeasure.
* cell_measure (string or CellMeasure)
The (name of the) cell measure to look for.
"""
cell_measure = self.cell_measure(cell_measure)
# Search for existing cell measure (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [
dims
for cm_, dims in self._cell_measures_and_dims
if cm_ is cell_measure
]
if not matches:
raise iris.exceptions.CellMeasureNotFoundError(cell_measure.name())
return matches[0]
def ancillary_variable_dims(self, ancillary_variable):
"""
Returns a tuple of the data dimensions relevant to the given
AncillaryVariable.
* ancillary_variable (string or AncillaryVariable)
The (name of the) AncillaryVariable to look for.
"""
ancillary_variable = self.ancillary_variable(ancillary_variable)
# Search for existing ancillary variable (object) on the cube, faster
# lookup than equality - makes no functional difference.
matches = [
dims
for av, dims in self._ancillary_variables_and_dims
if av is ancillary_variable
]
if not matches:
raise iris.exceptions.AncillaryVariableNotFoundError(
ancillary_variable.name()
)
return matches[0]
def aux_factory(
self, name=None, standard_name=None, long_name=None, var_name=None
):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The NetCDF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [
factory for factory in factories if factory.name() == name
]
if standard_name is not None:
factories = [
factory
for factory in factories
if factory.standard_name == standard_name
]
if long_name is not None:
factories = [
factory
for factory in factories
if factory.long_name == long_name
]
if var_name is not None:
factories = [
factory
for factory in factories
if factory.var_name == var_name
]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = (
"Expected to find exactly one coordinate factory, but "
"found {}. They were: {}.".format(
len(factories), ", ".join(factory_names)
)
)
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = (
"Expected to find exactly one coordinate factory, but "
"found none."
)
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(
self,
name_or_coord=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
contains_dimension=None,
dimensions=None,
coord_system=None,
dim_coords=None,
mesh_coords=None,
):
"""
Return a list of coordinates from the :class:`Cube` that match the
provided criteria.
.. seealso::
:meth:`Cube.coord` for matching exactly one coordinate.
Kwargs:
* name_or_coord:
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of the desired
coordinate e.g., :class:`~iris.coords.DimCoord` or
:class:`~iris.common.metadata.CoordMetadata`.
* standard_name:
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard name``.
* long_name:
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name:
The NetCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes:
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis:
The desired coordinate axis, see :func:`iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
* contains_dimension:
The desired coordinate contains the data dimension. If ``None``, does
not check for the dimension.
* dimensions:
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty ``tuple`` or
``list`` i.e., ``()`` or ``[]``. If ``None``, does not check for
dimensions.
* coord_system:
Whether the desired coordinates have a coordinate system equal to
the given coordinate system. If ``None``, no check is done.
* dim_coords:
Set to ``True`` to only return coordinates that are the cube's
dimension coordinates. Set to ``False`` to only return coordinates
that are the cube's auxiliary, mesh and derived coordinates.
If ``None``, returns all coordinates.
* mesh_coords:
Set to ``True`` to return only coordinates which are
:class:`~iris.experimental.ugrid.MeshCoord`\\ s.
Set to ``False`` to return only non-mesh coordinates.
If ``None``, returns all coordinates.
Returns:
A list containing zero or more coordinates matching the provided
criteria.
"""
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if mesh_coords is not None:
# Select on mesh or non-mesh.
mesh_coords = bool(mesh_coords)
# Use duck typing to avoid importing from iris.experimental.ugrid,
# which could be a circular import.
if mesh_coords:
# *only* MeshCoords
coords_and_factories = [
item
for item in coords_and_factories
if hasattr(item, "mesh")
]
else:
# *not* MeshCoords
coords_and_factories = [
item
for item in coords_and_factories
if not hasattr(item, "mesh")
]
coords_and_factories = metadata_filter(
coords_and_factories,
item=name_or_coord,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
)
if coord_system is not None:
coords_and_factories = [
coord_
for coord_ in coords_and_factories
if coord_.coord_system == coord_system
]
if contains_dimension is not None:
coords_and_factories = [
coord_
for coord_ in coords_and_factories
if contains_dimension in self.coord_dims(coord_)
]
if dimensions is not None:
if not isinstance(dimensions, Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [
coord_
for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions
]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = "Expected Coord or AuxCoordFactory, got " "{!r}.".format(
type(coord_or_factory)
)
raise ValueError(msg)
return coord
coords = [
extract_coord(coord_or_factory)
for coord_or_factory in coords_and_factories
]
return coords
def coord(
self,
name_or_coord=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
contains_dimension=None,
dimensions=None,
coord_system=None,
dim_coords=None,
mesh_coords=None,
):
"""
Return a single coordinate from the :class:`Cube` that matches the
provided criteria.
.. note::
If the arguments given do not result in **precisely one** coordinate,
then a :class:`~iris.exceptions.CoordinateNotFoundError` is raised.
.. seealso::
:meth:`Cube.coords` for matching zero or more coordinates.
Kwargs:
* name_or_coord:
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of the desired
coordinate e.g., :class:`~iris.coords.DimCoord` or
:class:`~iris.common.metadata.CoordMetadata`.
* standard_name:
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard name``.
* long_name:
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name:
The NetCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes:
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis:
The desired coordinate axis, see :func:`iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
* contains_dimension:
The desired coordinate contains the data dimension. If ``None``, does
not check for the dimension.
* dimensions:
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty ``tuple`` or
``list`` i.e., ``()`` or ``[]``. If ``None``, does not check for
dimensions.
* coord_system:
Whether the desired coordinates have a coordinate system equal to
the given coordinate system. If ``None``, no check is done.
* dim_coords:
Set to ``True`` to only return coordinates that are the cube's
dimension coordinates. Set to ``False`` to only return coordinates
that are the cube's auxiliary, mesh and derived coordinates.
If ``None``, returns all coordinates.
* mesh_coords:
Set to ``True`` to return only coordinates which are
:class:`~iris.experimental.ugrid.MeshCoord`\\ s.
Set to ``False`` to return only non-mesh coordinates.
If ``None``, returns all coordinates.
Returns:
The coordinate that matches the provided criteria.
"""
coords = self.coords(
name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords,
)
if len(coords) > 1:
emsg = (
f"Expected to find exactly 1 coordinate, but found {len(coords)}. "
f"They were: {', '.join(coord.name() for coord in coords)}."
)
raise iris.exceptions.CoordinateNotFoundError(emsg)
elif len(coords) == 0:
_name = name_or_coord
if name_or_coord is not None:
if not isinstance(name_or_coord, str):
_name = name_or_coord.name()
bad_name = _name or standard_name or long_name or ""
emsg = (
f"Expected to find exactly 1 {bad_name!r} coordinate, "
"but found none."
)
raise iris.exceptions.CoordinateNotFoundError(emsg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, str) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(
coord_systems.keys(), key=lambda class_: class_.__name__
):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
def _any_meshcoord(self):
"""Return a MeshCoord if there are any, else None."""
mesh_coords = self.coords(mesh_coords=True)
if mesh_coords:
result = mesh_coords[0]
else:
result = None
return result
@property
def mesh(self):
"""
Return the unstructured :class:`~iris.experimental.ugrid.Mesh`
associated with the cube, if the cube has any
:class:`~iris.experimental.ugrid.MeshCoord`\\ s,
or ``None`` if it has none.
Returns:
* mesh (:class:`iris.experimental.ugrid.mesh.Mesh` or None):
The mesh of the cube
:class:`~iris.experimental.ugrid.MeshCoord`\\s,
or ``None``.
"""
result = self._any_meshcoord()
if result is not None:
result = result.mesh
return result
@property
def location(self):
"""
Return the mesh "location" of the cube data, if the cube has any
:class:`~iris.experimental.ugrid.MeshCoord`\\ s,
or ``None`` if it has none.
Returns:
* location (str or None):
The mesh location of the cube
:class:`~iris.experimental.ugrid.MeshCoord`\\s
(i.e. one of 'face' / 'edge' / 'node'),
or ``None``.
"""
result = self._any_meshcoord()
if result is not None:
result = result.location
return result
def mesh_dim(self):
"""
Return the cube dimension of the mesh, if the cube has any
:class:`~iris.experimental.ugrid.MeshCoord`\\ s,
or ``None`` if it has none.
Returns:
* mesh_dim (int, or None):
the cube dimension which the cube
:class:`~iris.experimental.ugrid.MeshCoord`\\s map to,
or ``None``.
"""
result = self._any_meshcoord()
if result is not None:
(result,) = self.coord_dims(result) # result is a 1-tuple
return result
def cell_measures(self, name_or_cell_measure=None):
"""
Return a list of cell measures in this cube fitting the given criteria.
Kwargs:
* name_or_cell_measure
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris.common.CFVariableMixin`.
(b) a cell_measure instance with metadata equal to that of
the desired cell_measures.
See also :meth:`Cube.cell_measure()<iris.cube.Cube.cell_measure>`.
"""
name = None
if isinstance(name_or_cell_measure, str):
name = name_or_cell_measure
else:
cell_measure = name_or_cell_measure
cell_measures = []
for cm, _ in self._cell_measures_and_dims:
if name is not None:
if cm.name() == name:
cell_measures.append(cm)
elif cell_measure is not None:
if cm == cell_measure:
cell_measures.append(cm)
else:
cell_measures.append(cm)
return cell_measures
def cell_measure(self, name_or_cell_measure=None):
"""
Return a single cell_measure given the same arguments as
:meth:`Cube.cell_measures`.
.. note::
If the arguments given do not result in precisely 1 cell_measure
being matched, an :class:`iris.exceptions.CellMeasureNotFoundError`
is raised.
.. seealso::
:meth:`Cube.cell_measures()<iris.cube.Cube.cell_measures>`
for full keyword documentation.
"""
cell_measures = self.cell_measures(name_or_cell_measure)
if len(cell_measures) > 1:
msg = (
"Expected to find exactly 1 cell_measure, but found {}. "
"They were: {}."
)
msg = msg.format(
len(cell_measures),
", ".join(cm.name() for cm in cell_measures),
)
raise iris.exceptions.CellMeasureNotFoundError(msg)
elif len(cell_measures) == 0:
if isinstance(name_or_cell_measure, str):
bad_name = name_or_cell_measure
else:
bad_name = (
name_or_cell_measure and name_or_cell_measure.name()
) or ""
msg = (
"Expected to find exactly 1 %s cell_measure, but found "
"none." % bad_name
)
raise iris.exceptions.CellMeasureNotFoundError(msg)
return cell_measures[0]
def ancillary_variables(self, name_or_ancillary_variable=None):
"""
Return a list of ancillary variable in this cube fitting the given
criteria.
Kwargs:
* name_or_ancillary_variable
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris.common.CFVariableMixin`.
(b) a ancillary_variable instance with metadata equal to that of
the desired ancillary_variables.
See also
:meth:`Cube.ancillary_variable()<iris.cube.Cube.ancillary_variable>`.
"""
name = None
if isinstance(name_or_ancillary_variable, str):
name = name_or_ancillary_variable
else:
ancillary_variable = name_or_ancillary_variable
ancillary_variables = []
for av, _ in self._ancillary_variables_and_dims:
if name is not None:
if av.name() == name:
ancillary_variables.append(av)
elif ancillary_variable is not None:
if av == ancillary_variable:
ancillary_variables.append(av)
else:
ancillary_variables.append(av)
return ancillary_variables
def ancillary_variable(self, name_or_ancillary_variable=None):
"""
Return a single ancillary_variable given the same arguments as
:meth:`Cube.ancillary_variables`.
.. note::
If the arguments given do not result in precisely 1
ancillary_variable being matched, an
:class:`iris.exceptions.AncillaryVariableNotFoundError` is raised.
.. seealso::
:meth:`Cube.ancillary_variables()<iris.cube.Cube.ancillary_variables>`
for full keyword documentation.
"""
ancillary_variables = self.ancillary_variables(
name_or_ancillary_variable
)
if len(ancillary_variables) > 1:
msg = (
"Expected to find exactly 1 ancillary_variable, but found "
"{}. They were: {}."
)
msg = msg.format(
len(ancillary_variables),
", ".join(anc_var.name() for anc_var in ancillary_variables),
)
raise iris.exceptions.AncillaryVariableNotFoundError(msg)
elif len(ancillary_variables) == 0:
if isinstance(name_or_ancillary_variable, str):
bad_name = name_or_ancillary_variable
else:
bad_name = (
name_or_ancillary_variable
and name_or_ancillary_variable.name()
) or ""
msg = (
"Expected to find exactly 1 {!s} ancillary_variable, but "
"found none.".format(bad_name)
)
raise iris.exceptions.AncillaryVariableNotFoundError(msg)
return ancillary_variables[0]
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._metadata_manager.cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._metadata_manager.cell_methods = (
tuple(cell_methods) if cell_methods else tuple()
)
def core_data(self):
"""
Retrieve the data array of this :class:`~iris.cube.Cube` in its
current state, which will either be real or lazy.
If this :class:`~iris.cube.Cube` has lazy data, accessing its data
array via this method **will not** realise the data array. This means
you can perform operations using this method that work equivalently
on real or lazy data, and will maintain lazy data if present.
"""
return self._data_manager.core_data()
@property
def shape(self):
"""The shape of the data of this cube."""
return self._data_manager.shape
@property
def dtype(self):
"""
The data type of the values in the data array of this
:class:`~iris.cube.Cube`.
"""
return self._data_manager.dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return self._data_manager.ndim
def lazy_data(self):
"""
Return a "lazy array" representing the Cube data. A lazy array
describes an array whose data values have not been loaded into memory
from disk.
Accessing this method will never cause the Cube data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube data to be loaded.
If the Cube data have already been loaded (for example by calling
:meth:`~iris.cube.Cube.data`), the returned Array will be a view of the
loaded cube data represented as a lazy array object. Note that this
does _not_ make the Cube data lazy again; the Cube data remains loaded
in memory.
Returns:
A lazy array, representing the Cube data.
"""
return self._data_manager.lazy_data()
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from NetCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
return self._data_manager.data
@data.setter
def data(self, data):
self._data_manager.data = data
def has_lazy_data(self):
"""
Details whether this :class:`~iris.cube.Cube` has lazy data.
Returns:
Boolean.
"""
return self._data_manager.has_lazy_data()
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple(
(
coord
for coord, dim in sorted(
self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()),
)
)
)
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple(
(
coord
for coord, dims in sorted(
self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()),
)
)
)
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(
factory.make_coord(self.coord_dims)
for factory in sorted(
self.aux_factories, key=lambda factory: factory.name()
)
)
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def summary(self, shorten=False, name_padding=35):
"""
String summary of the Cube with name+units, a list of dim coord names
versus length and, optionally, a summary of all other components.
Kwargs:
* shorten (bool):
If set, produce a one-line summary of minimal width, showing only
the cube name, units and dimensions.
When not set (default), produces a full multi-line summary string.
* name_padding (int):
Control the *minimum* width of the cube name + units,
i.e. the indent of the dimension map section.
"""
from iris._representation.cube_printout import CubePrinter
printer = CubePrinter(self)
summary = printer.to_string(oneline=shorten, name_padding=name_padding)
return summary
def __str__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(
shorten=True, name_padding=1
)
def _repr_html_(self):
from iris.experimental.representation import CubeRepresentation
representer = CubeRepresentation(self)
return representer.repr_html()
# Indicate that the iter option is not available. Python will raise
# TypeError with a useful message if a Cube is iterated over.
__iter__ = None
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys, self.ndim)
def new_coord_dims(coord_):
return [
dimension_mapping[d]
for d in self.coord_dims(coord_)
if dimension_mapping[d] is not None
]
def new_cell_measure_dims(cm_):
return [
dimension_mapping[d]
for d in self.cell_measure_dims(cm_)
if dimension_mapping[d] is not None
]
def new_ancillary_variable_dims(av_):
return [
dimension_mapping[d]
for d in self.ancillary_variable_dims(av_)
if dimension_mapping[d] is not None
]
# Fetch the data as a generic array-like object.
cube_data = self._data_manager.core_data()
# Index with the keys, using orthogonal slicing.
dimension_mapping, data = iris.util._slice_data_with_keys(
cube_data, keys
)
# We don't want a view of the data, so take a copy of it.
data = deepcopy(data)
# XXX: Slicing a single item from a masked array that is masked,
# results in numpy (v1.11.1) *always* returning a MaskedConstant
# with a dtype of float64, regardless of the original masked
# array dtype!
if (
isinstance(data, ma.core.MaskedConstant)
and data.dtype != cube_data.dtype
):
data = ma.array(data.data, mask=data.mask, dtype=cube_data.dtype)
# Make the new cube slice
cube = Cube(data)
cube.metadata = deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple(
[full_slice[dim] for dim in self.coord_dims(coord)]
)
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple(
[full_slice[dim] for dim in self.coord_dims(coord)]
)
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
# slice the cell measures and add them to the cube
for cellmeasure in self.cell_measures():
dims = self.cell_measure_dims(cellmeasure)
cm_keys = tuple([full_slice[dim] for dim in dims])
new_cm = cellmeasure[cm_keys]
cube.add_cell_measure(new_cm, new_cell_measure_dims(cellmeasure))
# slice the ancillary variables and add them to the cube
for ancvar in self.ancillary_variables():
dims = self.ancillary_variable_dims(ancvar)
av_keys = tuple([full_slice[dim] for dim in dims])
new_av = ancvar[av_keys]
cube.add_ancillary_variable(
new_av, new_ancillary_variable_dims(ancvar)
)
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant
coordinate. If the coordinate provided applies to the whole cube; the
whole cube is returned. As such, the operation is not strict.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError("coord_to_extract must be a valid Coord.")
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
# If scalar, return the whole cube. Not possible to subset 1 point.
if (
coord_to_extract in self.aux_coords
and len(coord_to_extract.points) == 1
):
# Default to returning None
result = None
indices = coord_to_extract.intersect(coord, return_indices=True)
# If there is an intersect between the two scalar coordinates;
# return the whole cube. Else, return None.
if len(indices):
result = self
else:
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate
# references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coord_indices = coord_to_extract.intersect(
coord, return_indices=True
)
if coord_indices.size == 0:
# No matches found.
return
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which
# were identified above
full_slice[coord_to_extract_dim] = coord_indices
full_slice = tuple(full_slice)
result = self[full_slice]
return result
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) positional arguments: instances of :class:`iris.coords.CoordExtent`,
or equivalent tuples of 3-5 items:
* coord
Either a :class:`iris.coords.Coord`, or coordinate name
(as defined in :meth:`iris.cube.Cube.coords()`)
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate, and the value defines the corresponding range of
coordinate values as a tuple. The tuple must contain two, three, or
four items, corresponding to `(minimum, maximum, min_inclusive,
max_inclusive)` as defined above.
Kwargs:
* ignore_bounds:
Intersect based on points only. Default False.
* threshold:
Minimum proportion of a bounded cell that must overlap with the
specified range. Default 0.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where necessary. When requesting a
range that covers the entire modulus, a split cell will
preferentially be placed at the ``minimum`` end.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop("ignore_bounds", False)
threshold = kwargs.pop("threshold", 0)
for arg in args:
result = result._intersect(
*arg, ignore_bounds=ignore_bounds, threshold=threshold
)
for name, value in kwargs.items():
result = result._intersect(
name, *value, ignore_bounds=ignore_bounds, threshold=threshold
)
return result
def _intersect(
self,
name_or_coord,
minimum,
maximum,
min_inclusive=True,
max_inclusive=True,
ignore_bounds=False,
threshold=0,
):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError("expected 0 or 2 bound values per cell")
if minimum > maximum:
raise ValueError("minimum greater than maximum")
modulus = coord.units.modulus
if modulus is None:
raise ValueError(
"coordinate units with no modulus are not yet supported"
)
subsets, points, bounds = self._intersect_modulus(
coord,
minimum,
maximum,
min_inclusive,
max_inclusive,
ignore_bounds,
threshold,
)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
# subsets provides a way of slicing the coordinates to ensure that
# they remain contiguous. In doing so, this can mean
# transforming the data (this stitching together of two separate
# pieces).
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
(dim,) = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
chunk_data = [chunk.core_data() for chunk in chunks]
if self.has_lazy_data():
func = da.concatenate
else:
module = ma if ma.isMaskedArray(self.data) else np
func = module.concatenate
data = func(chunk_data, dim)
result = iris.cube.Cube(data)
result.metadata = deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (
min_inclusive
and max_inclusive
and abs(maximum - minimum) == modulus
)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate(
[
chunk.coord(src_coord).points
for chunk in chunks
],
dim_within_coord,
)
if src_coord.has_bounds():
bounds = np.concatenate(
[
chunk.coord(src_coord).bounds
for chunk in chunks
],
dim_within_coord,
)
else:
bounds = None
result_coord = src_coord.copy(
points=points, bounds=bounds
)
circular = getattr(result_coord, "circular", False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_derive_subset(self, coord, points, bounds, inside_indices):
# Return the subsets, i.e. the means to allow the slicing of
# coordinates to ensure that they remain contiguous.
modulus = coord.units.modulus
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
def dim_coord_subset():
"""
Derive the subset for dimension coordinates.
Ensure that we do not wrap if blocks are at the very edge. That
is, if the very edge is wrapped and corresponds to base + period,
stop this unnecessary wraparound.
"""
# A contiguous block at the start and another at the end.
# (NB. We can't have more than two blocks because we've already
# restricted the coordinate's range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
index_of_second_chunk = inside_indices[end_of_first_chunk + 1]
final_index = points.size - 1
# Condition1: The two blocks don't themselves wrap
# (inside_indices is contiguous).
# Condition2: Are we chunked at either extreme edge.
edge_wrap = (
index_of_second_chunk == inside_indices[end_of_first_chunk] + 1
) and index_of_second_chunk in (final_index, 1)
subsets = None
if edge_wrap:
# Increasing coord
if coord.points[-1] > coord.points[0]:
index_end = -1
index_start = 0
# Decreasing coord
else:
index_end = 0
index_start = -1
# Unwrap points and bounds (if present and equal base + period)
if bounds is not None:
edge_equal_base_period = np.isclose(
coord.bounds[index_end, index_end],
coord.bounds[index_start, index_start] + modulus,
)
if edge_equal_base_period:
bounds[index_end, :] = coord.bounds[index_end, :]
else:
edge_equal_base_period = np.isclose(
coord.points[index_end],
coord.points[index_start] + modulus,
)
if edge_equal_base_period:
points[index_end] = coord.points[index_end]
subsets = [
slice(inside_indices[0], inside_indices[-1] + 1)
]
# Either no edge wrap or edge wrap != base + period
# i.e. derive subset without alteration
if subsets is None:
subsets = [
slice(index_of_second_chunk, None),
slice(None, inside_indices[end_of_first_chunk] + 1),
]
return subsets
if isinstance(coord, iris.coords.DimCoord):
if non_zero_step_indices.size:
subsets = dim_coord_subset()
else:
# A single, contiguous block.
subsets = [slice(inside_indices[0], inside_indices[-1] + 1)]
else:
# An AuxCoord could have its values in an arbitrary
# order, and hence a range of values can select an
# arbitrary subset. Also, we want to preserve the order
# from the original AuxCoord. So we just use the indices
# directly.
subsets = [inside_indices]
return subsets
def _intersect_modulus(
self,
coord,
minimum,
maximum,
min_inclusive,
max_inclusive,
ignore_bounds,
threshold,
):
modulus = coord.units.modulus
if maximum > minimum + modulus:
raise ValueError(
"requested range greater than coordinate's unit's modulus"
)
if coord.has_bounds():
values = coord.bounds
else:
ignore_bounds = True
values = coord.points
if values.max() > values.min() + modulus:
raise ValueError(
"coordinate's range greater than coordinate's unit's modulus"
)
min_comp = np.less_equal if min_inclusive else np.less
max_comp = np.less_equal if max_inclusive else np.less
if ignore_bounds:
points = wrap_lons(coord.points, minimum, modulus)
bounds = coord.bounds
if bounds is not None:
# To avoid splitting any cells (by wrapping only one of its
# bounds), apply exactly the same wrapping as the points.
# Note that the offsets should be exact multiples of the
# modulus, but may initially be slightly off and need rounding.
wrap_offset = points - coord.points
wrap_offset = np.round(wrap_offset / modulus) * modulus
bounds = coord.bounds + wrap_offset[:, np.newaxis]
# Check points only
(inside_indices,) = np.where(
np.logical_and(
min_comp(minimum, points), max_comp(points, maximum)
)
)
else:
# Set up slices to account for ascending/descending bounds
if coord.bounds[0, 0] < coord.bounds[0, 1]:
ilower = (slice(None), 0)
iupper = (slice(None), 1)
else:
ilower = (slice(None), 1)
iupper = (slice(None), 0)
# Initially wrap such that upper bounds are in [min, min + modulus]
# As with the ignore_bounds case, need to round to modulus due to
# floating point precision
upper = wrap_lons(coord.bounds[iupper], minimum, modulus)
wrap_offset = upper - coord.bounds[iupper]
wrap_offset = np.round(wrap_offset / modulus) * modulus
lower = coord.bounds[ilower] + wrap_offset
# Scale threshold for each bound
thresholds = (upper - lower) * threshold
# For a range that covers the whole modulus, there may be a
# cell that is "split" and could appear at either side of
# the range. Choose lower, unless there is not enough overlap.
if minimum + modulus == maximum and threshold == 0:
# Special case: overlapping in a single point
# (ie `minimum` itself) is always unintuitive
is_split = np.isclose(upper, minimum)
else:
is_split = upper - minimum < thresholds
wrap_offset += is_split * modulus
# Apply wrapping
points = coord.points + wrap_offset
bounds = coord.bounds + wrap_offset[:, np.newaxis]
# Interval [min, max] intersects [a, b] iff min <= b and a <= max
# (or < for non-inclusive min/max respectively).
# In this case, its length is L = min(max, b) - max(min, a)
upper = bounds[iupper]
lower = bounds[ilower]
overlap = np.where(
np.logical_and(
min_comp(minimum, upper), max_comp(lower, maximum)
),
np.minimum(maximum, upper) - np.maximum(minimum, lower),
np.nan,
)
(inside_indices,) = np.where(overlap >= thresholds)
# Determine the subsets
subsets = self._intersect_derive_subset(
coord, points, bounds, inside_indices
)
return subsets, points, bounds
def _as_list_of_coords(self, names_or_coords):
"""
Convert a name, coord, or list of names/coords to a list of coords.
"""
# If not iterable, convert to list of a single item
if _is_single_item(names_or_coords):
names_or_coords = [names_or_coords]
coords = []
for name_or_coord in names_or_coords:
if isinstance(name_or_coord, str) or isinstance(
name_or_coord, iris.coords.Coord
):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = (
"Don't know how to handle coordinate of type %s. "
"Ensure all coordinates are of type str "
"or iris.coords.Coord."
) % (type(name_or_coord),)
raise TypeError(msg)
return coords
def slices_over(self, ref_to_slice):
"""
Return an iterator of all subcubes along a given coordinate or
dimension index, or multiple of these.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be iterated along (i.e. the
dimensions that are not returned in the subcubes).
A mix of input types can also be provided.
Returns:
An iterator of subcubes.
For example, to get all subcubes along the time dimension::
for sub_cube in cube.slices_over('time'):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices`.
.. note::
The order of dimension references to slice along does not affect
the order of returned items in the iterator; instead the ordering
is based on the fastest-changing dimension.
"""
# Required to handle a mix between types.
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
slice_dims = set()
for ref in ref_to_slice:
try:
(coord,) = self._as_list_of_coords(ref)
except TypeError:
dim = int(ref)
if dim < 0 or dim > self.ndim:
msg = (
"Requested an iterator over a dimension ({}) "
"which does not exist.".format(dim)
)
raise ValueError(msg)
# Convert coord index to a single-element list to prevent a
# TypeError when `slice_dims.update` is called with it.
dims = [dim]
else:
dims = self.coord_dims(coord)
slice_dims.update(dims)
all_dims = set(range(self.ndim))
opposite_dims = list(all_dims - slice_dims)
return self.slices(opposite_dims, ordered=False)
def slices(self, ref_to_slice, ordered=True):
"""
Return an iterator of all subcubes given the coordinates or dimension
indices desired to be present in each subcube.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be returned in the subcubes (i.e.
the dimensions that are not iterated over).
A mix of input types can also be provided. They must all be
orthogonal (i.e. point to different dimensions).
Kwargs:
* ordered: if True, the order which the coords to slice or data_dims
are given will be the order in which they represent the data in
the resulting cube slices. If False, the order will follow that of
the source cube. Default is True.
Returns:
An iterator of subcubes.
For example, to get all 2d longitude/latitude subcubes from a
multi-dimensional cube::
for sub_cube in cube.slices(['longitude', 'latitude']):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices_over`.
"""
if not isinstance(ordered, bool):
raise TypeError("'ordered' argument to slices must be boolean.")
# Required to handle a mix between types
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
dim_to_slice = []
for ref in ref_to_slice:
try:
# attempt to handle as coordinate
coord = self._as_list_of_coords(ref)[0]
dims = self.coord_dims(coord)
if not dims:
msg = (
"Requested an iterator over a coordinate ({}) "
"which does not describe a dimension."
)
msg = msg.format(coord.name())
raise ValueError(msg)
dim_to_slice.extend(dims)
except TypeError:
try:
# attempt to handle as dimension index
dim = int(ref)
except ValueError:
raise ValueError(
"{} Incompatible type {} for "
"slicing".format(ref, type(ref))
)
if dim < 0 or dim > self.ndim:
msg = (
"Requested an iterator over a dimension ({}) "
"which does not exist.".format(dim)
)
raise ValueError(msg)
dim_to_slice.append(dim)
if len(set(dim_to_slice)) != len(dim_to_slice):
msg = "The requested coordinates are not orthogonal."
raise ValueError(msg)
# Create a list with of the shape of our data
dims_index = list(self.shape)
# Set the dimensions which have been requested to length 1
for d in dim_to_slice:
dims_index[d] = 1
return _SliceIterator(self, dims_index, dim_to_slice, ordered)
def transpose(self, new_order=None):
"""
Re-order the data dimensions of the cube in-place.
new_order - list of ints, optional
By default, reverse the dimensions, otherwise permute the
axes according to the values given.
.. note:: If defined, new_order must span all of the data dimensions.
Example usage::
# put the second dimension first, followed by the third dimension,
# and finally put the first dimension third::
>>> cube.transpose([1, 2, 0])
"""
if new_order is None:
new_order = np.arange(self.ndim)[::-1]
# `new_order` must be an iterable for checking with `self.ndim`.
# Dask transpose only supports lists, so ensure `new_order` is
# always a list.
new_order = list(new_order)
if len(new_order) != self.ndim:
raise ValueError("Incorrect number of dimensions.")
# Transpose the data payload.
dm = self._data_manager
data = dm.core_data().transpose(new_order)
self._data_manager = DataManager(data)
dim_mapping = {src: dest for dest, src in enumerate(new_order)}
# Remap all cube dimensional metadata (dim and aux coords and cell
# measures).
def remap_cube_metadata(metadata_and_dims):
metadata, dims = metadata_and_dims
if isinstance(dims, Iterable):
dims = tuple(dim_mapping[dim] for dim in dims)
else:
dims = dim_mapping[dims]
return metadata, dims
self._dim_coords_and_dims = list(
map(remap_cube_metadata, self._dim_coords_and_dims)
)
self._aux_coords_and_dims = list(
map(remap_cube_metadata, self._aux_coords_and_dims)
)
self._cell_measures_and_dims = list(
map(remap_cube_metadata, self._cell_measures_and_dims)
)
self._ancillary_variables_and_dims = list(
map(remap_cube_metadata, self._ancillary_variables_and_dims)
)
def xml(self, checksum=False, order=True, byteorder=True):
"""
Returns a fully valid CubeML string representation of the Cube.
"""
doc = Document()
cube_xml_element = self._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder
)
cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
doc.appendChild(cube_xml_element)
# Print our newly created XML
doc = self._sort_xml_attrs(doc)
return doc.toprettyxml(indent=" ")
def _xml_element(self, doc, checksum=False, order=True, byteorder=True):
cube_xml_element = doc.createElement("cube")
if self.standard_name:
cube_xml_element.setAttribute("standard_name", self.standard_name)
if self.long_name:
cube_xml_element.setAttribute("long_name", self.long_name)
if self.var_name:
cube_xml_element.setAttribute("var_name", self.var_name)
cube_xml_element.setAttribute("units", str(self.units))
cube_xml_element.setAttribute("dtype", self.dtype.name)
if self.attributes:
attributes_element = doc.createElement("attributes")
for name in sorted(self.attributes.keys()):
attribute_element = doc.createElement("attribute")
attribute_element.setAttribute("name", name)
value = self.attributes[name]
# Strict check because we don't want namedtuples.
if type(value) in (list, tuple):
delimiter = "[]" if isinstance(value, list) else "()"
value = ", ".join(
("'%s'" if isinstance(item, str) else "%s") % (item,)
for item in value
)
value = delimiter[0] + value + delimiter[1]
else:
value = str(value)
attribute_element.setAttribute("value", value)
attributes_element.appendChild(attribute_element)
cube_xml_element.appendChild(attributes_element)
def dimmeta_xml_element(element, typename, dimscall):
# Make an inner xml element for a cube DimensionalMetadata element, with a
# 'datadims' property showing how it maps to the parent cube dims.
xml_element = doc.createElement(typename)
dims = list(dimscall(element))
if dims:
xml_element.setAttribute("datadims", repr(dims))
xml_element.appendChild(element.xml_element(doc))
return xml_element
coords_xml_element = doc.createElement("coords")
for coord in sorted(self.coords(), key=lambda coord: coord.name()):
# make a "cube coordinate" element which holds the dimensions (if
# appropriate) which itself will have a sub-element of the
# coordinate instance itself.
coords_xml_element.appendChild(
dimmeta_xml_element(coord, "coord", self.coord_dims)
)
cube_xml_element.appendChild(coords_xml_element)
# cell methods (no sorting!)
cell_methods_xml_element = doc.createElement("cellMethods")
for cm in self.cell_methods:
cell_method_xml_element = cm.xml_element(doc)
cell_methods_xml_element.appendChild(cell_method_xml_element)
cube_xml_element.appendChild(cell_methods_xml_element)
# cell measures
cell_measures = sorted(self.cell_measures(), key=lambda cm: cm.name())
if cell_measures:
# This one is an optional subelement.
cms_xml_element = doc.createElement("cellMeasures")
for cm in cell_measures:
cms_xml_element.appendChild(
dimmeta_xml_element(
cm, "cell-measure", self.cell_measure_dims
)
)
cube_xml_element.appendChild(cms_xml_element)
# ancillary variables
ancils = sorted(self.ancillary_variables(), key=lambda anc: anc.name())
if ancils:
# This one is an optional subelement.
ancs_xml_element = doc.createElement("ancillaryVariables")
for anc in ancils:
ancs_xml_element.appendChild(
dimmeta_xml_element(
anc, "ancillary-var", self.ancillary_variable_dims
)
)
cube_xml_element.appendChild(ancs_xml_element)
# data
data_xml_element = doc.createElement("data")
data_xml_element.setAttribute("shape", str(self.shape))
# NB. Getting a checksum triggers any deferred loading,
# in which case it also has the side-effect of forcing the
# byte order to be native.
if checksum:
data = self.data
# Ensure consistent memory layout for checksums.
def normalise(data):
data = np.ascontiguousarray(data)
if data.dtype.newbyteorder("<") != data.dtype:
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder("<")
return data
if ma.isMaskedArray(data):
# Fill in masked values to avoid the checksum being
# sensitive to unused numbers. Use a fixed value so
# a change in fill_value doesn't affect the
# checksum.
crc = "0x%08x" % (
zlib.crc32(normalise(data.filled(0))) & 0xFFFFFFFF,
)
data_xml_element.setAttribute("checksum", crc)
if ma.is_masked(data):
crc = "0x%08x" % (
zlib.crc32(normalise(data.mask)) & 0xFFFFFFFF,
)
else:
crc = "no-masked-elements"
data_xml_element.setAttribute("mask_checksum", crc)
else:
crc = "0x%08x" % (zlib.crc32(normalise(data)) & 0xFFFFFFFF,)
data_xml_element.setAttribute("checksum", crc)
elif self.has_lazy_data():
data_xml_element.setAttribute("state", "deferred")
else:
data_xml_element.setAttribute("state", "loaded")
# Add the dtype, and also the array and mask orders if the
# data is loaded.
if not self.has_lazy_data():
data = self.data
dtype = data.dtype
def _order(array):
order = ""
if array.flags["C_CONTIGUOUS"]:
order = "C"
elif array.flags["F_CONTIGUOUS"]:
order = "F"
return order
if order:
data_xml_element.setAttribute("order", _order(data))
# NB. dtype.byteorder can return '=', which is bad for
# cross-platform consistency - so we use dtype.str
# instead.
if byteorder:
array_byteorder = {">": "big", "<": "little"}.get(dtype.str[0])
if array_byteorder is not None:
data_xml_element.setAttribute("byteorder", array_byteorder)
if order and ma.isMaskedArray(data):
data_xml_element.setAttribute("mask_order", _order(data.mask))
else:
dtype = self.lazy_data().dtype
data_xml_element.setAttribute("dtype", dtype.name)
cube_xml_element.appendChild(data_xml_element)
return cube_xml_element
def copy(self, data=None):
"""
Returns a deep copy of this cube.
Kwargs:
* data:
Replace the data of the cube copy with provided data payload.
Returns:
A copy instance of the :class:`Cube`.
"""
memo = {}
cube = self._deepcopy(memo, data=data)
return cube
def __copy__(self):
"""Shallow copying is disallowed for Cubes."""
raise copy.Error(
"Cube shallow-copy not allowed. Use deepcopy() or " "Cube.copy()"
)
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo, data=None):
dm = self._data_manager.copy(data=data)
new_dim_coords_and_dims = deepcopy(self._dim_coords_and_dims, memo)
new_aux_coords_and_dims = deepcopy(self._aux_coords_and_dims, memo)
new_cell_measures_and_dims = deepcopy(
self._cell_measures_and_dims, memo
)
new_ancillary_variables_and_dims = deepcopy(
self._ancillary_variables_and_dims, memo
)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
for old_pair, new_pair in zip(
self._dim_coords_and_dims, new_dim_coords_and_dims
):
coord_mapping[id(old_pair[0])] = new_pair[0]
for old_pair, new_pair in zip(
self._aux_coords_and_dims, new_aux_coords_and_dims
):
coord_mapping[id(old_pair[0])] = new_pair[0]
new_cube = Cube(
dm.core_data(),
dim_coords_and_dims=new_dim_coords_and_dims,
aux_coords_and_dims=new_aux_coords_and_dims,
cell_measures_and_dims=new_cell_measures_and_dims,
ancillary_variables_and_dims=new_ancillary_variables_and_dims,
)
new_cube.metadata = deepcopy(self.metadata, memo)
for factory in self.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
# START OPERATOR OVERLOADS
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Cube):
result = self.metadata == other.metadata
# having checked the metadata, now check the coordinates
if result:
coord_compares = (
iris.analysis._dimensional_metadata_comparison(self, other)
)
# if there are any coordinates which are not equal
result = not (
coord_compares["not_equal"]
or coord_compares["non_equal_data_dimension"]
)
if result:
cm_compares = iris.analysis._dimensional_metadata_comparison(
self, other, object_get=Cube.cell_measures
)
# if there are any cell measures which are not equal
result = not (
cm_compares["not_equal"]
or cm_compares["non_equal_data_dimension"]
)
if result:
av_compares = iris.analysis._dimensional_metadata_comparison(
self, other, object_get=Cube.ancillary_variables
)
# if there are any ancillary variables which are not equal
result = not (
av_compares["not_equal"]
or av_compares["non_equal_data_dimension"]
)
# Having checked everything else, check approximate data equality.
if result:
# TODO: why do we use allclose() here, but strict equality in
# _DimensionalMetadata (via util.array_equal())?
result = da.allclose(
self.core_data(), other.core_data()
).compute()
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__ as Python 3 does not enable it if __eq__ is defined.
# NOTE: Violates "objects which compare equal must have the same hash".
# We ought to remove this, as equality of two cube can *change*, so they
# really should not be hashable.
# However, current code needs it, e.g. so we can put them in sets.
# Fixing it will require changing those uses. See #962 and #1772.
def __hash__(self):
return hash(id(self))
__add__ = iris.analysis.maths.add
def __iadd__(self, other):
return iris.analysis.maths.add(self, other, in_place=True)
__radd__ = __add__
__sub__ = iris.analysis.maths.subtract
def __isub__(self, other):
return iris.analysis.maths.subtract(self, other, in_place=True)
def __rsub__(self, other):
return (-self) + other
__mul__ = iris.analysis.maths.multiply
def __imul__(self, other):
return iris.analysis.maths.multiply(self, other, in_place=True)
__rmul__ = __mul__
__div__ = iris.analysis.maths.divide
def __idiv__(self, other):
return iris.analysis.maths.divide(self, other, in_place=True)
def __rdiv__(self, other):
data = 1 / self.core_data()
reciprocal = self.copy(data=data)
return iris.analysis.maths.multiply(reciprocal, other)
__truediv__ = __div__
__itruediv__ = __idiv__
__rtruediv__ = __rdiv__
__pow__ = iris.analysis.maths.exponentiate
def __neg__(self):
return self.copy(data=-self.core_data())
# END OPERATOR OVERLOADS
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
and an aggregation.
Examples of aggregations that may be used include
:data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`.
Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may
also be supplied. These include :data:`~iris.analysis.MEAN` and
sum :data:`~iris.analysis.SUM`.
Weighted aggregations support an optional *weights* keyword argument.
If set, this should be supplied as an array of weights whose shape
matches the cube. Values for latitude-longitude area weights may be
calculated using :func:`iris.analysis.cartography.area_weights`.
Some Iris aggregators support "lazy" evaluation, meaning that
cubes resulting from this method may represent data arrays which are
not computed until the data is requested (e.g. via ``cube.data`` or
``iris.save``). If lazy evaluation exists for the given aggregator
it will be used wherever possible when this cube's data is itself
a deferred array.
Args:
* coords (string, coord or a list of strings/coords):
Coordinate names/coordinates over which the cube should be
collapsed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied for collapse operation.
Kwargs:
* kwargs:
Aggregation function keyword arguments.
Returns:
Collapsed cube.
For example:
>>> import iris
>>> import iris.analysis
>>> path = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(path)
>>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) (time: 54; latitude: 18)
Dimension coordinates:
time x -
latitude - x
Auxiliary coordinates:
forecast_reference_time x -
Scalar coordinates:
forecast_period 0 hours
longitude \
180.0 degrees, bound=(0.0, 360.0) degrees
Cell methods:
mean month, year
mean longitude
Attributes:
Conventions 'CF-1.5'
STASH m01s00i024
.. note::
Some aggregations are not commutative and hence the order of
processing is important i.e.::
tmp = cube.collapsed('realization', iris.analysis.VARIANCE)
result = tmp.collapsed('height', iris.analysis.VARIANCE)
is not necessarily the same result as::
tmp = cube.collapsed('height', iris.analysis.VARIANCE)
result2 = tmp.collapsed('realization', iris.analysis.VARIANCE)
Conversely operations which operate on more than one coordinate
at the same time are commutative as they are combined internally
into a single operation. Hence the order of the coordinates
supplied in the list does not matter::
cube.collapsed(['longitude', 'latitude'],
iris.analysis.VARIANCE)
is the same (apart from the logically equivalent cell methods that
may be created etc.) as::
cube.collapsed(['latitude', 'longitude'],
iris.analysis.VARIANCE)
"""
# Convert any coordinate names to coordinates
coords = self._as_list_of_coords(coords)
if isinstance(
aggregator, iris.analysis.WeightedAggregator
) and not aggregator.uses_weighting(**kwargs):
msg = "Collapsing spatial coordinate {!r} without weighting"
lat_match = [
coord for coord in coords if "latitude" in coord.name()
]
if lat_match:
for coord in lat_match:
warnings.warn(msg.format(coord.name()))
# Determine the dimensions we need to collapse (and those we don't)
if aggregator.cell_method == "peak":
dims_to_collapse = [
list(self.coord_dims(coord)) for coord in coords
]
# Remove duplicate dimensions.
new_dims = OrderedDict.fromkeys(
d for dim in dims_to_collapse for d in dim
)
# Reverse the dimensions so the order can be maintained when
# reshaping the data.
dims_to_collapse = list(new_dims)[::-1]
else:
dims_to_collapse = set()
for coord in coords:
dims_to_collapse.update(self.coord_dims(coord))
if not dims_to_collapse:
msg = (
"Cannot collapse a dimension which does not describe any "
"data."
)
raise iris.exceptions.CoordinateCollapseError(msg)
untouched_dims = set(range(self.ndim)) - set(dims_to_collapse)
collapsed_cube = iris.util._strip_metadata_from_dims(
self, dims_to_collapse
)
# Remove the collapsed dimension(s) from the metadata
indices = [slice(None, None)] * self.ndim
for dim in dims_to_collapse:
indices[dim] = 0
collapsed_cube = collapsed_cube[tuple(indices)]
# Collapse any coords that span the dimension(s) being collapsed
for coord in self.dim_coords + self.aux_coords:
coord_dims = self.coord_dims(coord)
if set(dims_to_collapse).intersection(coord_dims):
local_dims = [
coord_dims.index(dim)
for dim in dims_to_collapse
if dim in coord_dims
]
collapsed_cube.replace_coord(coord.collapsed(local_dims))
untouched_dims = sorted(untouched_dims)
# Record the axis(s) argument passed to 'aggregation', so the same is
# passed to the 'update_metadata' function.
collapse_axis = -1
data_result = None
# Perform the actual aggregation.
if aggregator.cell_method == "peak":
# The PEAK aggregator must collapse each coordinate separately.
untouched_shape = [self.shape[d] for d in untouched_dims]
collapsed_shape = [self.shape[d] for d in dims_to_collapse]
new_shape = untouched_shape + collapsed_shape
array_dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, array_dims).reshape(
new_shape
)
for dim in dims_to_collapse:
unrolled_data = aggregator.aggregate(
unrolled_data, axis=-1, **kwargs
)
data_result = unrolled_data
# Perform the aggregation in lazy form if possible.
elif aggregator.lazy_func is not None and self.has_lazy_data():
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
# accepts multiple axes (unlike 'aggregate').
collapse_axes = list(dims_to_collapse)
if len(collapse_axes) == 1:
# Replace a "list of 1 axes" with just a number : This single-axis form is *required* by functions
# like da.average (and np.average), if a 1d weights array is specified.
collapse_axes = collapse_axes[0]
try:
data_result = aggregator.lazy_aggregate(
self.lazy_data(), axis=collapse_axes, **kwargs
)
except TypeError:
# TypeError - when unexpected keywords passed through (such as
# weights to mean)
pass
# If we weren't able to complete a lazy aggregation, compute it
# directly now.
if data_result is None:
# Perform the (non-lazy) aggregation over the cube data
# First reshape the data so that the dimensions being aggregated
# over are grouped 'at the end' (i.e. axis=-1).
dims_to_collapse = sorted(dims_to_collapse)
end_size = reduce(
operator.mul, (self.shape[dim] for dim in dims_to_collapse)
)
untouched_shape = [self.shape[dim] for dim in untouched_dims]
new_shape = untouched_shape + [end_size]
dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, dims).reshape(new_shape)
# Perform the same operation on the weights if applicable
weights = kwargs.get("weights")
if weights is not None and weights.ndim > 1:
# Note: *don't* adjust 1d weights arrays, these have a special meaning for statistics functions.
weights = weights.view()
kwargs["weights"] = np.transpose(weights, dims).reshape(
new_shape
)
data_result = aggregator.aggregate(
unrolled_data, axis=-1, **kwargs
)
aggregator.update_metadata(
collapsed_cube, coords, axis=collapse_axis, **kwargs
)
result = aggregator.post_process(
collapsed_cube, data_result, coords, **kwargs
)
return result
def aggregated_by(self, coords, aggregator, **kwargs):
"""
Perform aggregation over the cube given one or more "group
coordinates".
A "group coordinate" is a coordinate where repeating values represent a
single group, such as a month coordinate on a daily time slice.
Repeated values will form a group even if they are not consecutive.
The group coordinates must all be over the same cube dimension. Each
common value group identified over all the group-by coordinates is
collapsed using the provided aggregator.
Args:
* coords (list of coord names or :class:`iris.coords.Coord` instances):
One or more coordinates over which group aggregation is to be
performed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to each group.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments.
Returns:
:class:`iris.cube.Cube`.
For example:
>>> import iris
>>> import iris.analysis
>>> import iris.coord_categorisation as cat
>>> fname = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(fname, 'surface_temperature')
>>> cat.add_year(cube, 'time', name='year')
>>> new_cube = cube.aggregated_by('year', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) \
(time: 5; latitude: 18; longitude: 432)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_reference_time \
x - -
year \
x - -
Scalar coordinates:
forecast_period 0 hours
Cell methods:
mean month, year
mean year
Attributes:
Conventions 'CF-1.5'
STASH m01s00i024
"""
groupby_coords = []
dimension_to_groupby = None
# We can't handle weights
if isinstance(
aggregator, iris.analysis.WeightedAggregator
) and aggregator.uses_weighting(**kwargs):
raise ValueError(
"Invalid Aggregation, aggregated_by() cannot use" " weights."
)
coords = self._as_list_of_coords(coords)
for coord in sorted(coords, key=lambda coord: coord.metadata):
if coord.ndim > 1:
msg = (
"Cannot aggregate_by coord %s as it is "
"multidimensional." % coord.name()
)
raise iris.exceptions.CoordinateMultiDimError(msg)
dimension = self.coord_dims(coord)
if not dimension:
msg = (
'Cannot group-by the coordinate "%s", as its '
"dimension does not describe any data." % coord.name()
)
raise iris.exceptions.CoordinateCollapseError(msg)
if dimension_to_groupby is None:
dimension_to_groupby = dimension[0]
if dimension_to_groupby != dimension[0]:
msg = "Cannot group-by coordinates over different dimensions."
raise iris.exceptions.CoordinateCollapseError(msg)
groupby_coords.append(coord)
# Determine the other coordinates that share the same group-by
# coordinate dimension.
shared_coords = list(
filter(
lambda coord_: coord_ not in groupby_coords,
self.coords(contains_dimension=dimension_to_groupby),
)
)
# Determine which of each shared coord's dimensions will be aggregated.
shared_coords_and_dims = [
(coord_, index)
for coord_ in shared_coords
for (index, dim) in enumerate(self.coord_dims(coord_))
if dim == dimension_to_groupby
]
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby(
groupby_coords, shared_coords_and_dims
)
# Create the resulting aggregate-by cube and remove the original
# coordinates that are going to be groupedby.
aggregateby_cube = iris.util._strip_metadata_from_dims(
self, [dimension_to_groupby]
)
key = [slice(None, None)] * self.ndim
# Generate unique index tuple key to maintain monotonicity.
key[dimension_to_groupby] = tuple(range(len(groupby)))
key = tuple(key)
aggregateby_cube = aggregateby_cube[key]
for coord in groupby_coords + shared_coords:
aggregateby_cube.remove_coord(coord)
# Determine the group-by cube data shape.
data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs))
data_shape[dimension_to_groupby] = len(groupby)
# Aggregate the group-by data.
if aggregator.lazy_func is not None and self.has_lazy_data():
front_slice = (slice(None, None),) * dimension_to_groupby
back_slice = (slice(None, None),) * (
len(data_shape) - dimension_to_groupby - 1
)
groupby_subcubes = map(
lambda groupby_slice: self[
front_slice + (groupby_slice,) + back_slice
].lazy_data(),
groupby.group(),
)
agg = partial(
aggregator.lazy_aggregate, axis=dimension_to_groupby, **kwargs
)
result = list(map(agg, groupby_subcubes))
aggregateby_data = da.stack(result, axis=dimension_to_groupby)
else:
cube_slice = [slice(None, None)] * len(data_shape)
for i, groupby_slice in enumerate(groupby.group()):
# Slice the cube with the group-by slice to create a group-by
# sub-cube.
cube_slice[dimension_to_groupby] = groupby_slice
groupby_sub_cube = self[tuple(cube_slice)]
# Perform the aggregation over the group-by sub-cube and
# repatriate the aggregated data into the aggregate-by
# cube data.
cube_slice[dimension_to_groupby] = i
result = aggregator.aggregate(
groupby_sub_cube.data, axis=dimension_to_groupby, **kwargs
)
# Determine aggregation result data type for the aggregate-by
# cube data on first pass.
if i == 0:
if ma.isMaskedArray(self.data):
aggregateby_data = ma.zeros(
data_shape, dtype=result.dtype
)
else:
aggregateby_data = np.zeros(
data_shape, dtype=result.dtype
)
aggregateby_data[tuple(cube_slice)] = result
# Add the aggregation meta data to the aggregate-by cube.
aggregator.update_metadata(
aggregateby_cube, groupby_coords, aggregate=True, **kwargs
)
# Replace the appropriate coordinates within the aggregate-by cube.
(dim_coord,) = self.coords(
dimensions=dimension_to_groupby, dim_coords=True
) or [None]
for coord in groupby.coords:
if (
dim_coord is not None
and dim_coord.metadata == coord.metadata
and isinstance(coord, iris.coords.DimCoord)
):
aggregateby_cube.add_dim_coord(
coord.copy(), dimension_to_groupby
)
else:
aggregateby_cube.add_aux_coord(
coord.copy(), self.coord_dims(coord)
)
# Attach the aggregate-by data into the aggregate-by cube.
aggregateby_cube = aggregator.post_process(
aggregateby_cube, aggregateby_data, coords, **kwargs
)
return aggregateby_cube
def rolling_window(self, coord, aggregator, window, **kwargs):
"""
Perform rolling window aggregation on a cube given a coordinate, an
aggregation method and a window size.
Args:
* coord (string/:class:`iris.coords.Coord`):
The coordinate over which to perform the rolling window
aggregation.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to the data.
* window (int):
Size of window to use.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments. The weights
argument to the aggregator, if any, should be a 1d array with the
same length as the chosen window.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris, iris.analysis
>>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp')
>>> air_press = iris.load_cube(fname, 'surface_temperature')
>>> print(air_press)
surface_temperature / (K) \
(time: 6; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time 2011-07-23 00:00:00
realization 10
Cell methods:
mean time (1 hour)
Attributes:
STASH m01s00i024
source \
'Data from Met Office Unified Model'
um_version '7.6'
>>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3))
surface_temperature / (K) \
(time: 4; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time 2011-07-23 00:00:00
realization 10
Cell methods:
mean time (1 hour)
mean time
Attributes:
STASH m01s00i024
source \
'Data from Met Office Unified Model'
um_version '7.6'
Notice that the forecast_period dimension now represents the 4
possible windows of size 3 from the original cube.
"""
coord = self._as_list_of_coords(coord)[0]
if getattr(coord, "circular", False):
raise iris.exceptions.NotYetImplementedError(
"Rolling window over a circular coordinate."
)
if window < 2:
raise ValueError(
"Cannot perform rolling window "
"with a window size less than 2."
)
if coord.ndim > 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
dimension = self.coord_dims(coord)
if len(dimension) != 1:
raise iris.exceptions.CoordinateCollapseError(
'Cannot perform rolling window with coordinate "%s", '
"must map to one data dimension." % coord.name()
)
dimension = dimension[0]
# Use indexing to get a result-cube of the correct shape.
# NB. This indexes the data array which is wasted work.
# As index-to-get-shape-then-fiddle is a common pattern, perhaps
# some sort of `cube.prepare()` method would be handy to allow
# re-shaping with given data, and returning a mapping of
# old-to-new-coords (to avoid having to use metadata identity)?
new_cube = iris.util._strip_metadata_from_dims(self, [dimension])
key = [slice(None, None)] * self.ndim
key[dimension] = slice(None, self.shape[dimension] - window + 1)
new_cube = new_cube[tuple(key)]
# take a view of the original data using the rolling_window function
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(
self.data, window=window, axis=dimension
)
# now update all of the coordinates to reflect the aggregation
for coord_ in self.coords(dimensions=dimension):
if coord_.has_bounds():
warnings.warn(
"The bounds of coordinate %r were ignored in "
"the rolling window operation." % coord_.name()
)
if coord_.ndim != 1:
raise ValueError(
"Cannot calculate the rolling "
"window of %s as it is a multidimensional "
"coordinate." % coord_.name()
)
new_bounds = iris.util.rolling_window(coord_.points, window)
if np.issubdtype(new_bounds.dtype, np.str_):
# Handle case where the AuxCoord contains string. The points
# are the serialized form of the points contributing to each
# window and the bounds are the first and last points in the
# window as with numeric coordinates.
new_points = np.apply_along_axis(
lambda x: "|".join(x), -1, new_bounds
)
new_bounds = new_bounds[:, (0, -1)]
else:
# Take the first and last element of the rolled window (i.e.
# the bounds) and the new points are the midpoints of these
# bounds.
new_bounds = new_bounds[:, (0, -1)]
new_points = np.mean(new_bounds, axis=-1)
# wipe the coords points and set the bounds
new_coord = new_cube.coord(coord_)
new_coord.points = new_points
new_coord.bounds = new_bounds
# update the metadata of the cube itself
aggregator.update_metadata(
new_cube,
[coord],
action="with a rolling window of length %s over" % window,
**kwargs,
)
# and perform the data transformation, generating weights first if
# needed
if isinstance(
aggregator, iris.analysis.WeightedAggregator
) and aggregator.uses_weighting(**kwargs):
if "weights" in kwargs:
weights = kwargs["weights"]
if weights.ndim > 1 or weights.shape[0] != window:
raise ValueError(
"Weights for rolling window aggregation "
"must be a 1d array with the same length "
"as the window."
)
kwargs = dict(kwargs)
kwargs["weights"] = iris.util.broadcast_to_shape(
weights, rolling_window_data.shape, (dimension + 1,)
)
data_result = aggregator.aggregate(
rolling_window_data, axis=dimension + 1, **kwargs
)
result = aggregator.post_process(
new_cube, data_result, [coord], **kwargs
)
return result
def interpolate(self, sample_points, scheme, collapse_scalar=True):
"""
Interpolate from this :class:`~iris.cube.Cube` to the given
sample points using the given interpolation scheme.
Args:
* sample_points:
A sequence of (coordinate, points) pairs over which to
interpolate. The values for coordinates that correspond to
dates or times may optionally be supplied as datetime.datetime or
cftime.datetime instances.
* scheme:
An instance of the type of interpolation to use to interpolate from this
:class:`~iris.cube.Cube` to the given sample points. The
interpolation schemes currently available in Iris are:
* :class:`iris.analysis.Linear`, and
* :class:`iris.analysis.Nearest`.
Kwargs:
* collapse_scalar:
Whether to collapse the dimension of scalar sample points
in the resulting cube. Default is True.
Returns:
A cube interpolated at the given sample points.
If `collapse_scalar` is True then the dimensionality of the cube
will be the number of original cube dimensions minus
the number of scalar coordinates.
For example:
>>> import datetime
>>> import iris
>>> path = iris.sample_data_path('uk_hires.pp')
>>> cube = iris.load_cube(path, 'air_potential_temperature')
>>> print(cube.summary(shorten=True))
air_potential_temperature / (K) \
(time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(cube.coord('time'))
DimCoord : time / (hours since 1970-01-01 00:00:00, gregorian calendar)
points: [2009-11-19 10:00:00, 2009-11-19 11:00:00, 2009-11-19 12:00:00]
shape: (3,)
dtype: float64
standard_name: 'time'
>>> print(cube.coord('time').points)
[349618. 349619. 349620.]
>>> samples = [('time', 349618.5)]
>>> result = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result.coord('time'))
DimCoord : time / (hours since 1970-01-01 00:00:00, gregorian calendar)
points: [2009-11-19 10:30:00]
shape: (1,)
dtype: float64
standard_name: 'time'
>>> print(result.coord('time').points)
[349618.5]
>>> # For datetime-like coordinates, we can also use
>>> # datetime-like objects.
>>> samples = [('time', datetime.datetime(2009, 11, 19, 10, 30))]
>>> result2 = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result2.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result2.coord('time'))
DimCoord : time / (hours since 1970-01-01 00:00:00, gregorian calendar)
points: [2009-11-19 10:30:00]
shape: (1,)
dtype: float64
standard_name: 'time'
>>> print(result2.coord('time').points)
[349618.5]
>>> print(result == result2)
True
"""
coords, points = zip(*sample_points)
interp = scheme.interpolator(self, coords)
return interp(points, collapse_scalar=collapse_scalar)
def regrid(self, grid, scheme):
r"""
Regrid this :class:`~iris.cube.Cube` on to the given target `grid`
using the given regridding `scheme`.
Args:
* grid:
A :class:`~iris.cube.Cube` that defines the target grid.
* scheme:
An instance of the type of regridding to use to regrid this cube onto the
target grid. The regridding schemes in Iris currently include:
* :class:`iris.analysis.Linear`\*,
* :class:`iris.analysis.Nearest`\*,
* :class:`iris.analysis.AreaWeighted`\*,
* :class:`iris.analysis.UnstructuredNearest`,
* :class:`iris.analysis.PointInCell`,
\* Supports lazy regridding.
Returns:
A cube defined with the horizontal dimensions of the target grid
and the other dimensions from this cube. The data values of
this cube will be converted to values on the new grid
according to the given regridding scheme.
The returned cube will have lazy data if the original cube has
lazy data and the regridding scheme supports lazy regridding.
.. note::
Both the source and target cubes must have a CoordSystem, otherwise
this function is not applicable.
"""
regridder = scheme.regridder(self, grid)
return regridder(self)
class ClassDict(MutableMapping):
"""
A mapping that stores objects keyed on their superclasses and their names.
The mapping has a root class, all stored objects must be a subclass of the
root class. The superclasses used for an object include the class of the
object, but do not include the root class. Only one object is allowed for
any key.
"""
def __init__(self, superclass):
if not isinstance(superclass, type):
raise TypeError(
"The superclass must be a Python type or new " "style class."
)
self._superclass = superclass
self._basic_map = {}
self._retrieval_map = {}
def add(self, object_, replace=False):
"""Add an object to the dictionary."""
if not isinstance(object_, self._superclass):
msg = "Only subclasses of {!r} are allowed as values.".format(
self._superclass.__name__
)
raise TypeError(msg)
# Find all the superclasses of the given object, starting with the
# object's class.
superclasses = type.mro(type(object_))
if not replace:
# Ensure nothing else is already registered against those
# superclasses.
# NB. This implies the _basic_map will also be empty for this
# object.
for key_class in superclasses:
if key_class in self._retrieval_map:
msg = (
"Cannot add instance of '%s' because instance of "
"'%s' already added."
% (type(object_).__name__, key_class.__name__)
)
raise ValueError(msg)
# Register the given object against those superclasses.
for key_class in superclasses:
self._retrieval_map[key_class] = object_
self._retrieval_map[key_class.__name__] = object_
self._basic_map[type(object_)] = object_
def __getitem__(self, class_):
try:
return self._retrieval_map[class_]
except KeyError:
raise KeyError("Coordinate system %r does not exist." % class_)
def __setitem__(self, key, value):
raise NotImplementedError("You must call the add method instead.")
def __delitem__(self, class_):
cs = self[class_]
keys = [k for k, v in self._retrieval_map.items() if v == cs]
for key in keys:
del self._retrieval_map[key]
del self._basic_map[type(cs)]
return cs
def __len__(self):
return len(self._basic_map)
def __iter__(self):
for item in self._basic_map:
yield item
def keys(self):
"""Return the keys of the dictionary mapping."""
return self._basic_map.keys()
def sorted_axes(axes):
"""
Returns the axis names sorted alphabetically, with the exception that
't', 'z', 'y', and, 'x' are sorted to the end.
"""
return sorted(
axes,
key=lambda name: ({"x": 4, "y": 3, "z": 2, "t": 1}.get(name, 0), name),
)
# See Cube.slice() for the definition/context.
class _SliceIterator(Iterator):
def __init__(self, cube, dims_index, requested_dims, ordered):
self._cube = cube
# Let Numpy do some work in providing all of the permutations of our
# data shape. This functionality is something like:
# ndindex(2, 1, 3) -> [(0, 0, 0), (0, 0, 1), (0, 0, 2),
# (1, 0, 0), (1, 0, 1), (1, 0, 2)]
self._ndindex = np.ndindex(*dims_index)
self._requested_dims = requested_dims
# indexing relating to sliced cube
self._mod_requested_dims = np.argsort(requested_dims)
self._ordered = ordered
def __next__(self):
# NB. When self._ndindex runs out it will raise StopIteration for us.
index_tuple = next(self._ndindex)
# Turn the given tuple into a list so that we can do something with it
index_list = list(index_tuple)
# For each of the spanning dimensions requested, replace the 0 with a
# spanning slice
for d in self._requested_dims:
index_list[d] = slice(None, None)
# Request the slice
cube = self._cube[tuple(index_list)]
if self._ordered:
if any(self._mod_requested_dims != list(range(len(cube.shape)))):
n = len(self._mod_requested_dims)
sliced_dims = np.empty(n, dtype=int)
sliced_dims[self._mod_requested_dims] = np.arange(n)
cube.transpose(sliced_dims)
return cube
next = __next__
|
SciTools/iris
|
lib/iris/cube.py
|
Python
|
lgpl-3.0
| 164,190
|
[
"NetCDF"
] |
6004f07f3e2de9d4d2441ed8b117c82ec93f6ae3d2986d31c66cad4a43e54c82
|
"""
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from . import BASE_URL
from utils import click_css, confirm_prompt
class ContainerPage(PageObject):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/coffee/src/xblock/core.coffee which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
self.q(css='div.ui-loading.is-hidden').present and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
return 'icon-check' in self.q(css='a.action-staff-lock>i').attrs('class')
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def preview(self):
"""
Clicks "Preview Changes", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, 'a.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, 'a.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self)
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if not descendant.locator in grand_locators]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, bounded_selector=lambda(x): x):
"""
Click on the first edit button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector('.edit-button')).first.click()
EmptyPromise(
lambda: page_object.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
|
peterm-itr/edx-platform
|
common/test/acceptance/pages/studio/container.py
|
Python
|
agpl-3.0
| 14,433
|
[
"VisIt"
] |
907da3fc9d7429ac7109cc48e9b3f716b81a01e07bb37e5412e1da45ad3a3a19
|
#!/usr/bin/env python
# file test_exclude_seqs_by_blast.py
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jesse Zaneveld", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
"""
Test code for exclude_seqs_by_blast.py.
NOTE: requires BLAST to be properly installed with
environment variable set for tests to pass
"""
from os import remove, system, close
from random import choice
from tempfile import mkstemp
from numpy import array, arange, log, log10
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from bfillings.blast import BlastResult
from qiime.exclude_seqs_by_blast import blast_genome,\
find_homologs,\
sequences_to_file,\
no_filter,\
make_percent_align_filter,\
query_ids_from_blast_result,\
ids_from_fasta_lines,\
id_from_fasta_label_line,\
seqs_from_file,\
ids_to_seq_file
from qiime.util import remove_files
class ExcludeHumanTests(TestCase):
def setUp(self):
self.blast_lines = BLAST_LINES
self.blast_result = BlastResult(self.blast_lines)
fd, self.subjectdb_fp = mkstemp(prefix='ExcludeByBlastTests_',
suffix='.fasta')
close(fd)
fd, self.query_fp = mkstemp(prefix='ExcludeByBlastTests_',
suffix='.fasta')
close(fd)
fd, self.query2_fp = mkstemp(prefix='ExcludeByBlastTests_',
suffix='.fasta')
close(fd)
open(self.subjectdb_fp, "w").writelines(TEST_BLAST_DB_LINES)
open(self.query_fp, "w").writelines(TEST_BLAST_DB_LINES)
open(self.query2_fp, "w").writelines(TEST_BLAST_DB2_LINES)
self._paths_to_clean_up = [self.subjectdb_fp, self.query_fp,
self.query2_fp]
def tearDown(self):
remove_files(self._paths_to_clean_up)
def test_blast_genome(self):
"""blast_genome should return raw BLAST output."""
formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp
system(formatdb_cmd)
self._paths_to_clean_up.append("formatdb.log")
for suffix in ["nhr", "nin", "nsd", "nsi", "nsq"]:
self._paths_to_clean_up.append(".".join(
[self.subjectdb_fp, suffix]))
raw_output = blast_genome(TEST_BLAST_DB_LINES, self.subjectdb_fp,
e_value=1e-4, max_hits=100, word_size=28,
working_dir="./", blast_mat_root=None)
i = 0
for line in raw_output:
if line.startswith("#"):
i += 1
continue # comments depend on tmpfilename, BLAST version
self.assertEqual(raw_output[i], EXP_BLAST_OUTPUT[i])
i += 1
def test_find_homologs(self):
"""find_homologs should return raw data, filtered and removed ids."""
formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp
system(formatdb_cmd)
self._paths_to_clean_up.append("formatdb.log")
for suffix in ["nhr", "nin", "nsd", "nsi", "nsq"]:
self._paths_to_clean_up.append(".".join(
[self.subjectdb_fp, suffix]))
blast_output, hit_ids, removed_hit_ids =\
find_homologs(self.query_fp, self.subjectdb_fp, e_value=1e-4,
max_hits=100, working_dir="./", blast_mat_root=None,
wordsize=28, percent_aligned=0.98, DEBUG=False)
self.assertEqual(hit_ids, set(["bth:BT_0001", "hsa:8355"]))
self.assertEqual(removed_hit_ids, set())
i = 0
for line in blast_output:
if line.startswith("#"):
i += 1
continue # depends on tmpfilename, skip testing
self.assertEqual(blast_output[i], EXP_BLAST_OUTPUT[i])
i += 1
# Ensure low % alignment seqs are removed
blast_output, hit_ids, removed_hit_ids =\
find_homologs(self.query2_fp, self.subjectdb_fp,
e_value=1e-4, max_hits=100, working_dir="./",
blast_mat_root=None, wordsize=28, percent_aligned=1.00,
DEBUG=False)
self.assertEqual(hit_ids, set(["bth:BT_0001"]))
self.assertEqual(removed_hit_ids, set(["hsa:8355_tweaked"]))
# Ensure high % alignment seqs are not removed
blast_output, hit_ids, removed_hit_ids =\
find_homologs(self.query2_fp, self.subjectdb_fp,
e_value=1e-4, max_hits=100, working_dir="./",
blast_mat_root=None, wordsize=28, percent_aligned=0.75,
DEBUG=False)
self.assertEqual(hit_ids, set(["bth:BT_0001", "hsa:8355_tweaked"]))
self.assertEqual(removed_hit_ids, set())
def test_sequences_to_file(self):
"""sequences_to_file should write a standard format FASTA file."""
fd, self.seq_test_fp = mkstemp(prefix='ExcludeByBlastTests_',
suffix='.fasta')
close(fd)
self._paths_to_clean_up.append(self.seq_test_fp)
ids = ["bth:BT_0001", "hsa:8355"]
seqs = seqs_from_file(ids, open(self.query_fp).readlines())
sequences_to_file(seqs, self.seq_test_fp)
self.assertEqual(open(self.seq_test_fp).readlines(),
open(self.query_fp).readlines())
def test_no_filter(self):
"""no_filter should always return True."""
d1 = {"% IDENTITY": "97.6"}
d2 = {"% IDENTITY": "0.0"}
d3 = {"% IDENTITY": "100.0"}
self.assertTrue(no_filter(d1))
self.assertTrue(no_filter(d2))
self.assertTrue(no_filter(d3))
def test_make_percent_align_filter(self):
"""make_percent_align_filter should return a percent align filter fn"""
d1 = {"% IDENTITY": "97.6"}
d2 = {"% IDENTITY": "0.0"}
d3 = {"% IDENTITY": "100.0"}
af1 = make_percent_align_filter(0.50)
af2 = make_percent_align_filter(0.00)
af3 = make_percent_align_filter(1.0)
# Test filter 1
self.assertTrue(af1(d1))
self.assertFalse(af1(d2))
self.assertTrue(af1(d3))
# Test filter 2
self.assertTrue(af2(d1))
self.assertTrue(af2(d2))
self.assertTrue(af2(d3))
# Test filter 3
self.assertFalse(af3(d1))
self.assertFalse(af3(d2))
self.assertTrue(af3(d3))
def test_query_ids_from_blast_result(self):
"query_ids_from_blast_result should return query_ids matching filter"
align_filter = make_percent_align_filter(2.0) # none should pass
ok_ids, removed_ids = query_ids_from_blast_result(
self.blast_result, align_filter, DEBUG=True)
self.assertEqual(ok_ids, set())
def test_ids_from_fasta_lines(self):
""" ids_from_fasta_lines should return ids"""
fasta_lines = \
[">hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3",
"atggcccgcaccaagcagactgcacgcaagtccaccggtggcaaagcgccgcgcaagcagctgg",
"ccactaaggcggctcggaaaagcgcgccggccaccggcggcgtgaagaaacctcatcgctaccg",
"tcccggcaccgtggctctgcgcgagattcgccgctatcagaagtcgactgagctgctgatccgc",
"aagttgcctttccaacgcctggtgcgagaaatcgctcaggacttcaagacagatctgcgctttc",
"agagttccgcggtgatggccctgcaggaggcctgcgaggcctacttggtggggctctttgagga",
"taccaacctgtgtgccatccatgctaagcgagtgactatcatgcccaaggacattcagctcgct",
"cgccgcattcgtggggagagagcgtag",
">hsa:9081 PRY; PTPN13-like, Y-linked",
"atgggagccactgggcttggctttctactttcctggagacaagacaatttgaatggcact"]
exp_ids = ["hsa:8355", "hsa:9081"]
obs_ids = ids_from_fasta_lines(fasta_lines)
self.assertEqual(obs_ids, exp_ids)
def test_id_from_fasta_label_line(self):
"""id_from_fasta_label_line should extract id"""
label_line = \
">hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3"
self.assertEqual(id_from_fasta_label_line(label_line), "hsa:8355")
def test_seqs_from_file(self):
"""seqs_from_file should extract labels,seqs for specified ids"""
ids = "bth:BT_0001"
seqs = seqs_from_file(ids, open(self.query_fp).readlines())
all_results = []
for label, seq in seqs:
all_results.append((label, seq))
self.assertEqual(1, len(all_results)) # should return only 1 entry
# fn should return version lacking ">" and newlines
self.assertEqual(all_results[0],
(TEST_BLAST_DB_LINES[2].strip(">").strip(),
TEST_BLAST_DB_LINES[3].strip()))
def test_ids_to_seq_file(self):
"""ids_to_seq_file should lookup and write out seqs for given ids"""
fd, self.id_test_fp = mkstemp(prefix='ExcludeByBlastTests_',
suffix='.fasta')
close(fd)
self._paths_to_clean_up.append(self.id_test_fp)
ids = ["bth:BT_0001"]
ids_to_seq_file(ids, self.query_fp, self.id_test_fp)
# this is the bth entry
exp_lines = open(self.query_fp).readlines()[2:]
self.assertEqual(open(self.id_test_fp).readlines(), exp_lines)
# Predefined Test strings
BLAST_LINES = [
'# BLASTN 2.2.16 [Mar-25-2007]\n',
'# Query: hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3\n',
'# Database: /home/zaneveld/quicksand/data/human_genome/h.sapiens.nuc\n',
'# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\n',
'hsa:8355\thsa:8355\t95.00\t411\t0\t0\t1\t411\t1\t411\t0.0\t 815\n',
'hsa:8355\thsa:8351\t88.29\t410\t48\t0\t1\t410\t1\t410\t8e-121\t 432\n',
'hsa:8355\thsa:8353\t87.15\t397\t51\t0\t13\t409\t13\t409\t7e-106\t 383\n',
'hsa:8355\thsa:8968\t86.63\t404\t54\t0\t1\t404\t1\t404\t6e-103\t 373\n',
'hsa:8355\thsa:8354\t86.84\t380\t50\t0\t25\t404\t25\t404\t4e-98\t 357\n',
'# BLASTN 2.2.16 [Mar-25-2007]\n',
'# Query: hsa:9081 PRY; PTPN13-like, Y-linked\n',
'# Database: /home/zaneveld/quicksand/data/human_genome/h.sapiens.nuc\n',
'# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\n',
'hsa:9081\thsa:442862\t100.00\t444\t0\t0\t1\t444\t1\t444\t0.0\t 880\n',
'hsa:9081\thsa:9081\t100.00\t444\t0\t0\t1\t444\t1\t444\t0.0\t 880\n',
'# BLASTN 2.2.16 [Mar-25-2007]\n',
'# Query: hsa:23434 C3orf27; chromosome 3 open reading frame 27\n',
'# Database: /home/zaneveld/quicksand/data/human_genome/h.sapiens.nuc\n',
'# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\n',
'hsa:23434\thsa:23434\t100.00\t236\t0\t0\t1\t236\t1\t236\t2e-131\t 468\n',
'hsa:23434\thsa:23434\t100.00\t196\t0\t0\t255\t450\t255\t450\t1e-107\t 389\n']
TEST_BLAST_DB2_LINES = [
">hsa:8355_tweaked HIST1H3G; <fake data for testing>\n",
"""atggcccgcaccaagcagactgcacgcaagtccaccggtggcaaagcgccgcgcaagcagctggccactaaggcggctttggaaaagcgcgccggccaccggcggcgtgaagaaacctcatcgctaccgtcccggcaccgtggctctgcgcgagattcgccgctatcagaagtcgactgagctgctgatccgcaagttgcctttccaacgcctggtgcgagaaatcgctcaggacttcaagacagatctgcgctttcagacttccgcggtgatggccctgcaggaggcctgcgaggcctacttggtggggctctttgaggataccaacctgtgtgccatccatgctaagcgagtgactatcatgcccaaggacattcagctcgctcgccgcattcgtggggagagagcgtag\n""",
">bth:BT_0001 hypothetical protein\n",
"""ttggtatctaccagtacgcacgacgatgcttttgacttcgactttggttacactggtaagcttcagttcttggtagccactgtagatgcaaatagtacctattacactaaagacccgaatggtattgaatgtgataacgacggaagcagttcatctttaactccgttcactcacccgacaatcagtaacttaacaatcgttggaaccgttaatggtaaggttgcacaatctgcaatgggtgatggtaaatccatgaaatcttgtgccaacttccgtagaaactgccaatttactttggtgaacagtattctttacggatatcctaccggtatcttgtgtgaaaccactaacagctatgttttcaaaaacaatgttgtaaatggtgttagtactacattttcaggtatcacagctgacgcgactaatactgctgctgcaagtgctgaggctattgggctgacttctccgtggggtggatatacaggtttgatgcctaatgcatctccagccaatgcaggtgcagattttagtgaattggatagttggtttacgactacttcttacagaggtgctgttggtggacgttcaaactggttaactcaagcgtgggtaaaataa\n"""]
TEST_BLAST_DB_LINES = [
">hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3\n",
"""atggcccgcaccaagcagactgcacgcaagtccaccggtggcaaagcgccgcgcaagcagctggccactaaggcggctcggaaaagcgcgccggccaccggcggcgtgaagaaacctcatcgctaccgtcccggcaccgtggctctgcgcgagattcgccgctatcagaagtcgactgagctgctgatccgcaagttgcctttccaacgcctggtgcgagaaatcgctcaggacttcaagacagatctgcgctttcagagttccgcggtgatggccctgcaggaggcctgcgaggcctacttggtggggctctttgaggataccaacctgtgtgccatccatgctaagcgagtgactatcatgcccaaggacattcagctcgctcgccgcattcgtggggagagagcgtag\n""",
">bth:BT_0001 hypothetical protein\n",
"""ttggtatctaccagtacgcacgacgatgcttttgacttcgactttggttacactggtaagcttcagttcttggtagccactgtagatgcaaatagtacctattacactaaagacccgaatggtattgaatgtgataacgacggaagcagttcatctttaactccgttcactcacccgacaatcagtaacttaacaatcgttggaaccgttaatggtaaggttgcacaatctgcaatgggtgatggtaaatccatgaaatcttgtgccaacttccgtagaaactgccaatttactttggtgaacagtattctttacggatatcctaccggtatcttgtgtgaaaccactaacagctatgttttcaaaaacaatgttgtaaatggtgttagtactacattttcaggtatcacagctgacgcgactaatactgctgctgcaagtgctgaggctattgggctgacttctccgtggggtggatatacaggtttgatgcctaatgcatctccagccaatgcaggtgcagattttagtgaattggatagttggtttacgactacttcttacagaggtgctgttggtggacgttcaaactggttaactcaagcgtgggtaaaataa\n"""]
EXP_BLAST_OUTPUT =\
['# BLASTN 2.2.16 [Mar-25-2007]\n',
'# Query: hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3\n',
'# Database: /tmp/ExcludeByBlastTests_FQifdUaBoAngS1XGIvnP.fasta\n',
'# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\n',
'hsa:8355\thsa:8355\t100.00\t411\t0\t0\t1\t411\t1\t411\t0.0\t 815\n',
'# BLASTN 2.2.16 [Mar-25-2007]\n',
'# Query: bth:BT_0001 hypothetical protein\n',
'# Database: /tmp/ExcludeByBlastTests_FQifdUaBoAngS1XGIvnP.fasta\n',
'# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\n',
'bth:BT_0001\tbth:BT_0001\t100.00\t618\t0\t0\t1\t618\t1\t618\t0.0\t1225\n']
if __name__ == "__main__":
main()
|
adamrp/qiime
|
tests/test_exclude_seqs_by_blast.py
|
Python
|
gpl-2.0
| 14,442
|
[
"BLAST"
] |
36b380a060704e622961bb25f1e934913432cc867cf07d7a130095e7552ca7bc
|
# GoPiGo Connectome
# Written by Timothy Busbice, Gabriel Garrett, Geoffrey Churchill (c) 2014, in Python 2.7
# The GoPiGo Connectome uses a postSynaptic dictionary based on the C Elegans Connectome Model
# This application can be ran on the Raspberry Pi GoPiGo robot with a Sonar that represents Nose Touch when activated
# To run standalone without a GoPiGo robot, simply comment out the sections with Start and End comments
#TIME STATE EXPERIMENTAL OPTIMIZATION
#The previous version had a logic error whereby if more than one neuron fired into the same neuron in the next time state,
# it would overwrite the contribution from the previous neuron. Thus, only one neuron could fire into the same neuron at any given time state.
# This version also explicitly lists all left and right muscles, so that during the muscle checks for the motor control function, instead of
# iterating through each neuron, we now iterate only through the relevant muscle neurons.
## Start Comment
#from gopigo import *
## End Comment
import time
import copy
# The postSynaptic dictionary contains the accumulated weighted values as the
# connectome is executed
postSynaptic = {}
global thisState
global nextState
thisState = 0
nextState = 1
# The Threshold is the maximum sccumulated value that must be exceeded before
# the Neurite will fire
threshold = 30
# Accumulators are used to decide the value to send to the Left and Right motors
# of the GoPiGo robot
accumleft = 0
accumright = 0
# Used to remove from Axon firing since muscles cannot fire.
muscles = ['MVU', 'MVL', 'MDL', 'MVR', 'MDR']
muscleList = ['MDL07', 'MDL08', 'MDL09', 'MDL10', 'MDL11', 'MDL12', 'MDL13', 'MDL14', 'MDL15', 'MDL16', 'MDL17', 'MDL18', 'MDL19', 'MDL20', 'MDL21', 'MDL22', 'MDL23', 'MVL07', 'MVL08', 'MVL09', 'MVL10', 'MVL11', 'MVL12', 'MVL13', 'MVL14', 'MVL15', 'MVL16', 'MVL17', 'MVL18', 'MVL19', 'MVL20', 'MVL21', 'MVL22', 'MVL23', 'MDR07', 'MDR08', 'MDR09', 'MDR10', 'MDR11', 'MDR12', 'MDR13', 'MDR14', 'MDR15', 'MDR16', 'MDR17', 'MDR18', 'MDR19', 'MDR20', 'MDL21', 'MDR22', 'MDR23', 'MVR07', 'MVR08', 'MVR09', 'MVR10', 'MVR11', 'MVR12', 'MVR13', 'MVR14', 'MVR15', 'MVR16', 'MVR17', 'MVR18', 'MVR19', 'MVR20', 'MVL21', 'MVR22', 'MVR23']
mLeft = ['MDL07', 'MDL08', 'MDL09', 'MDL10', 'MDL11', 'MDL12', 'MDL13', 'MDL14', 'MDL15', 'MDL16', 'MDL17', 'MDL18', 'MDL19', 'MDL20', 'MDL21', 'MDL22', 'MDL23', 'MVL07', 'MVL08', 'MVL09', 'MVL10', 'MVL11', 'MVL12', 'MVL13', 'MVL14', 'MVL15', 'MVL16', 'MVL17', 'MVL18', 'MVL19', 'MVL20', 'MVL21', 'MVL22', 'MVL23']
mRight = ['MDR07', 'MDR08', 'MDR09', 'MDR10', 'MDR11', 'MDR12', 'MDR13', 'MDR14', 'MDR15', 'MDR16', 'MDR17', 'MDR18', 'MDR19', 'MDR20', 'MDL21', 'MDR22', 'MDR23', 'MVR07', 'MVR08', 'MVR09', 'MVR10', 'MVR11', 'MVR12', 'MVR13', 'MVR14', 'MVR15', 'MVR16', 'MVR17', 'MVR18', 'MVR19', 'MVR20', 'MVL21', 'MVR22', 'MVR23']
# Used to accumulate muscle weighted values in body muscles 07-23 = worm locomotion
musDleft = ['MDL07', 'MDL08', 'MDL09', 'MDL10', 'MDL11', 'MDL12', 'MDL13', 'MDL14', 'MDL15', 'MDL16', 'MDL17', 'MDL18', 'MDL19', 'MDL20', 'MDL21', 'MDL22', 'MDL23']
musVleft = ['MVL07', 'MVL08', 'MVL09', 'MVL10', 'MVL11', 'MVL12', 'MVL13', 'MVL14', 'MVL15', 'MVL16', 'MVL17', 'MVL18', 'MVL19', 'MVL20', 'MVL21', 'MVL22', 'MVL23']
musDright = ['MDR07', 'MDR08', 'MDR09', 'MDR10', 'MDR11', 'MDR12', 'MDR13', 'MDR14', 'MDR15', 'MDR16', 'MDR17', 'MDR18', 'MDR19', 'MDR20', 'MDL21', 'MDR22', 'MDR23']
musVright = ['MVR07', 'MVR08', 'MVR09', 'MVR10', 'MVR11', 'MVR12', 'MVR13', 'MVR14', 'MVR15', 'MVR16', 'MVR17', 'MVR18', 'MVR19', 'MVR20', 'MVL21', 'MVR22', 'MVR23']
# This is the full C Elegans Connectome as expresed in the form of the Presynatptic
# neurite and the postSynaptic neurites
# postSynaptic['ADAR'][nextState] = (2 + postSynaptic['ADAR'][thisState])
# arr=postSynaptic['AIBR'] potential optimization
def ADAL():
postSynaptic['ADAR'][nextState] += 2
postSynaptic['ADFL'][nextState] += 1
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 2
postSynaptic['ASHL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 4
postSynaptic['AVBR'][nextState] += 7
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 5
postSynaptic['FLPR'][nextState] += 1
postSynaptic['PVQL'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIML'][nextState] += 3
postSynaptic['RIPL'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 2
print (nextState)
def ADAR():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['ADFR'][nextState] += 1
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 5
postSynaptic['AVDL'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 3
postSynaptic['PVQR'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIMR'][nextState] += 5
postSynaptic['RIPR'][nextState] += 1
postSynaptic['RIVR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 2
def ADEL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['ADER'][nextState] += 1
postSynaptic['AINL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 3
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 1
postSynaptic['AVL'][nextState] += 1
postSynaptic['BDUL'][nextState] += 1
postSynaptic['CEPDL'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['IL1L'][nextState] += 1
postSynaptic['IL2L'][nextState] += 1
postSynaptic['MDL05'][nextState] += 1
postSynaptic['OLLL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIFL'][nextState] += 1
postSynaptic['RIGL'][nextState] += 5
postSynaptic['RIGR'][nextState] += 3
postSynaptic['RIH'][nextState] += 2
postSynaptic['RIVL'][nextState] += 1
postSynaptic['RIVR'][nextState] += 1
postSynaptic['RMDL'][nextState] += 2
postSynaptic['RMGL'][nextState] += 1
postSynaptic['RMHL'][nextState] += 1
postSynaptic['SIADR'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['URBL'][nextState] += 1
def ADER():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['ADEL'][nextState] += 2
postSynaptic['ALA'][nextState] += 1
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVJR'][nextState] += 1
postSynaptic['AVKL'][nextState] += 2
postSynaptic['AVKR'][nextState] += 1
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['FLPR'][nextState] += 1
postSynaptic['OLLR'][nextState] += 2
postSynaptic['PVR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 7
postSynaptic['RIGR'][nextState] += 4
postSynaptic['RIH'][nextState] += 1
postSynaptic['RMDR'][nextState] += 2
postSynaptic['SAAVR'][nextState] += 1
def ADFL():
postSynaptic['ADAL'][nextState] += 2
postSynaptic['AIZL'][nextState] += 12
postSynaptic['AUAL'][nextState] += 5
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 15
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIR'][nextState] += 2
postSynaptic['SMBVL'][nextState] += 2
#print (postSynaptic['ADAL'][nextState])
def ADFR():
postSynaptic['ADAR'][nextState] += 2
postSynaptic['AIAR'][nextState] += 1
postSynaptic['AIYR'][nextState] += 1
postSynaptic['AIZR'][nextState] += 8
postSynaptic['ASHR'][nextState] += 1
postSynaptic['AUAR'][nextState] += 4
postSynaptic['AWBR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 16
postSynaptic['RIGR'][nextState] += 3
postSynaptic['RIR'][nextState] += 3
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMBVR'][nextState] += 2
postSynaptic['URXR'][nextState] += 1
def ADLL():
postSynaptic['ADLR'][nextState] += 1
postSynaptic['AIAL'][nextState] += 6
postSynaptic['AIBL'][nextState] += 7
postSynaptic['AIBR'][nextState] += 1
postSynaptic['ALA'][nextState] += 2
postSynaptic['ASER'][nextState] += 3
postSynaptic['ASHL'][nextState] += 2
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 3
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 4
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 3
postSynaptic['AWBL'][nextState] += 2
postSynaptic['OLQVL'][nextState] += 2
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
def ADLR():
postSynaptic['ADLL'][nextState] += 1
postSynaptic['AIAR'][nextState] += 10
postSynaptic['AIBR'][nextState] += 10
postSynaptic['ASER'][nextState] += 1
postSynaptic['ASHR'][nextState] += 3
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVDL'][nextState] += 5
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVJR'][nextState] += 1
postSynaptic['AWCR'][nextState] += 3
postSynaptic['OLLR'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
def AFDL():
postSynaptic['AFDR'][nextState] += 1
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AINR'][nextState] += 1
postSynaptic['AIYL'][nextState] += 7
def AFDR():
postSynaptic['AFDL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIYR'][nextState] += 13
postSynaptic['ASER'][nextState] += 1
def AIAL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['AIAR'][nextState] += 1
postSynaptic['AIBL'][nextState] += 10
postSynaptic['AIML'][nextState] += 2
postSynaptic['AIZL'][nextState] += 1
postSynaptic['ASER'][nextState] += 3
postSynaptic['ASGL'][nextState] += 1
postSynaptic['ASHL'][nextState] += 1
postSynaptic['ASIL'][nextState] += 2
postSynaptic['ASKL'][nextState] += 3
postSynaptic['AWAL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
postSynaptic['HSNL'][nextState] += 1
postSynaptic['RIFL'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
def AIAR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['ADFR'][nextState] += 1
postSynaptic['ADLR'][nextState] += 2
postSynaptic['AIAL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 14
postSynaptic['AIZR'][nextState] += 1
postSynaptic['ASER'][nextState] += 1
postSynaptic['ASGR'][nextState] += 1
postSynaptic['ASIR'][nextState] += 2
postSynaptic['AWAR'][nextState] += 2
postSynaptic['AWCL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 3
postSynaptic['RIFR'][nextState] += 2
def AIBL():
postSynaptic['AFDL'][nextState] += 1
postSynaptic['AIYL'][nextState] += 1
postSynaptic['ASER'][nextState] += 1
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVBL'][nextState] += 5
postSynaptic['DVC'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIBR'][nextState] += 4
postSynaptic['RIFL'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 3
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 13
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RIVL'][nextState] += 1
postSynaptic['SAADL'][nextState] += 2
postSynaptic['SAADR'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 4
def AIBR():
postSynaptic['AFDR'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 3
postSynaptic['AVEL'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DVC'][nextState] += 2
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 4
postSynaptic['RIGL'][nextState] += 3
postSynaptic['RIML'][nextState] += 16
postSynaptic['RIML'][nextState] += 1
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RIVR'][nextState] += 1
postSynaptic['SAADL'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 3
postSynaptic['SMDVL'][nextState] += 1
postSynaptic['VB1'][nextState] += 3
def AIML():
postSynaptic['AIAL'][nextState] += 5
postSynaptic['ALML'][nextState] += 1
postSynaptic['ASGL'][nextState] += 2
postSynaptic['ASKL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVFL'][nextState] += 4
postSynaptic['AVFR'][nextState] += 1
postSynaptic['AVHL'][nextState] += 2
postSynaptic['AVHR'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['PVQL'][nextState] += 1
postSynaptic['RIFL'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 1
postSynaptic['SMBVL'][nextState] += 1
def AIMR():
postSynaptic['AIAR'][nextState] += 5
postSynaptic['ASGR'][nextState] += 2
postSynaptic['ASJR'][nextState] += 2
postSynaptic['ASKR'][nextState] += 3
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['HSNL'][nextState] += 1
postSynaptic['HSNR'][nextState] += 2
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['PVNR'][nextState] += 1
postSynaptic['RIFR'][nextState] += 1
postSynaptic['RMGR'][nextState] += 1
def AINL():
postSynaptic['ADEL'][nextState] += 1
postSynaptic['AFDR'][nextState] += 5
postSynaptic['AINR'][nextState] += 2
postSynaptic['ASEL'][nextState] += 3
postSynaptic['ASGR'][nextState] += 2
postSynaptic['AUAR'][nextState] += 2
postSynaptic['BAGL'][nextState] += 3
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 2
def AINR():
postSynaptic['AFDL'][nextState] += 4
postSynaptic['AFDR'][nextState] += 1
postSynaptic['AIAL'][nextState] += 2
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AINL'][nextState] += 2
postSynaptic['ASEL'][nextState] += 1
postSynaptic['ASER'][nextState] += 1
postSynaptic['ASGL'][nextState] += 1
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AUAR'][nextState] += 1
postSynaptic['BAGR'][nextState] += 3
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RID'][nextState] += 1
def AIYL():
postSynaptic['AIYR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 13
postSynaptic['AWAL'][nextState] += 3
postSynaptic['AWCL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['RIAL'][nextState] += 7
postSynaptic['RIBL'][nextState] += 4
postSynaptic['RIML'][nextState] += 1
def AIYR():
postSynaptic['ADFR'][nextState] += 1
postSynaptic['AIYL'][nextState] += 1
postSynaptic['AIZR'][nextState] += 8
postSynaptic['AWAR'][nextState] += 1
postSynaptic['HSNL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 6
postSynaptic['RIBR'][nextState] += 2
postSynaptic['RIMR'][nextState] += 1
def AIZL():
postSynaptic['AIAL'][nextState] += 3
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AIBR'][nextState] += 8
postSynaptic['AIZR'][nextState] += 2
postSynaptic['ASEL'][nextState] += 1
postSynaptic['ASGL'][nextState] += 1
postSynaptic['ASHL'][nextState] += 1
postSynaptic['AVER'][nextState] += 5
postSynaptic['DVA'][nextState] += 1
postSynaptic['RIAL'][nextState] += 8
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIML'][nextState] += 4
postSynaptic['SMBDL'][nextState] += 9
postSynaptic['SMBVL'][nextState] += 7
postSynaptic['VB2'][nextState] += 1
def AIZR():
postSynaptic['AIAR'][nextState] += 1
postSynaptic['AIBL'][nextState] += 8
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 2
postSynaptic['ASGR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 4
postSynaptic['AVER'][nextState] += 1
postSynaptic['AWAR'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['RIAR'][nextState] += 7
postSynaptic['RIMR'][nextState] += 4
postSynaptic['SMBDR'][nextState] += 5
postSynaptic['SMBVR'][nextState] += 3
postSynaptic['SMDDR'][nextState] += 1
def ALA():
postSynaptic['ADEL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AVER'][nextState] += 1
postSynaptic['RID'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
def ALML():
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVM'][nextState] += 1
postSynaptic['BDUL'][nextState] += 6
postSynaptic['CEPDL'][nextState] += 3
postSynaptic['CEPVL'][nextState] += 2
postSynaptic['PVCL'][nextState] += 2
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVR'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
postSynaptic['SDQL'][nextState] += 1
def ALMR():
postSynaptic['AVM'][nextState] += 1
postSynaptic['BDUR'][nextState] += 5
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['CEPVR'][nextState] += 1
postSynaptic['PVCR'][nextState] += 3
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['SIADL'][nextState] += 1
def ALNL():
postSynaptic['SAAVL'][nextState] += 3
postSynaptic['SMBDR'][nextState] += 2
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 1
def ALNR():
postSynaptic['ADER'][nextState] += 1
postSynaptic['RMHR'][nextState] += 1
postSynaptic['SAAVR'][nextState] += 2
postSynaptic['SMBDL'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 1
def AQR():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 3
postSynaptic['AVBL'][nextState] += 3
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 4
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 2
postSynaptic['AVKR'][nextState] += 1
postSynaptic['BAGL'][nextState] += 2
postSynaptic['BAGR'][nextState] += 2
postSynaptic['PVCR'][nextState] += 2
postSynaptic['PVPL'][nextState] += 1
postSynaptic['PVPL'][nextState] += 7
postSynaptic['PVPR'][nextState] += 9
postSynaptic['RIAL'][nextState] += 3
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 2
postSynaptic['RIGR'][nextState] += 1
postSynaptic['URXL'][nextState] += 1
def AS1():
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DA1'][nextState] += 2
postSynaptic['MDL05'][nextState] += 3
postSynaptic['MDL08'][nextState] += 3
postSynaptic['MDR05'][nextState] += 3
postSynaptic['MDR08'][nextState] += 4
postSynaptic['VA3'][nextState] += 1
postSynaptic['VD1'][nextState] += 5
postSynaptic['VD2'][nextState] += 1
def AS2():
postSynaptic['DA2'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DD1'][nextState] += 1
postSynaptic['MDL07'][nextState] += 3
postSynaptic['MDL08'][nextState] += 2
postSynaptic['MDR07'][nextState] += 3
postSynaptic['MDR08'][nextState] += 3
postSynaptic['VA4'][nextState] += 2
postSynaptic['VD2'][nextState] += 10
def AS3():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 1
postSynaptic['DA2'][nextState] += 1
postSynaptic['DA3'][nextState] += 1
postSynaptic['DD1'][nextState] += 1
postSynaptic['MDL09'][nextState] += 3
postSynaptic['MDL10'][nextState] += 3
postSynaptic['MDR09'][nextState] += 3
postSynaptic['MDR10'][nextState] += 3
postSynaptic['VA5'][nextState] += 2
postSynaptic['VD2'][nextState] += 1
postSynaptic['VD3'][nextState] += 15
def AS4():
postSynaptic['AS5'][nextState] += 1
postSynaptic['DA3'][nextState] += 1
postSynaptic['MDL11'][nextState] += 2
postSynaptic['MDL12'][nextState] += 2
postSynaptic['MDR11'][nextState] += 3
postSynaptic['MDR12'][nextState] += 2
postSynaptic['VD4'][nextState] += 11
def AS5():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['DD2'][nextState] += 1
postSynaptic['MDL11'][nextState] += 2
postSynaptic['MDL14'][nextState] += 3
postSynaptic['MDR11'][nextState] += 2
postSynaptic['MDR14'][nextState] += 3
postSynaptic['VA7'][nextState] += 1
postSynaptic['VD5'][nextState] += 9
def AS6():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DA5'][nextState] += 2
postSynaptic['MDL13'][nextState] += 3
postSynaptic['MDL14'][nextState] += 2
postSynaptic['MDR13'][nextState] += 3
postSynaptic['MDR14'][nextState] += 2
postSynaptic['VA8'][nextState] += 1
postSynaptic['VD6'][nextState] += 13
def AS7():
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 5
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 2
postSynaptic['MDL13'][nextState] += 2
postSynaptic['MDL16'][nextState] += 3
postSynaptic['MDR13'][nextState] += 2
postSynaptic['MDR16'][nextState] += 3
def AS8():
postSynaptic['AVAL'][nextState] += 4
postSynaptic['AVAR'][nextState] += 3
postSynaptic['MDL15'][nextState] += 2
postSynaptic['MDL18'][nextState] += 3
postSynaptic['MDR15'][nextState] += 2
postSynaptic['MDR18'][nextState] += 3
def AS9():
postSynaptic['AVAL'][nextState] += 4
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DVB'][nextState] += 7
postSynaptic['MDL17'][nextState] += 2
postSynaptic['MDL20'][nextState] += 3
postSynaptic['MDR17'][nextState] += 2
postSynaptic['MDR20'][nextState] += 3
def AS10():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['MDL19'][nextState] += 3
postSynaptic['MDL20'][nextState] += 2
postSynaptic['MDR19'][nextState] += 3
postSynaptic['MDR20'][nextState] += 2
def AS11():
postSynaptic['MDL21'][nextState] += 1
postSynaptic['MDL22'][nextState] += 1
postSynaptic['MDL23'][nextState] += 1
postSynaptic['MDL24'][nextState] += 1
postSynaptic['MDR21'][nextState] += 1
postSynaptic['MDR22'][nextState] += 1
postSynaptic['MDR23'][nextState] += 1
postSynaptic['MDR24'][nextState] += 1
postSynaptic['PDA'][nextState] += 1
postSynaptic['PDB'][nextState] += 1
postSynaptic['PDB'][nextState] += 2
postSynaptic['VD13'][nextState] += 2
def ASEL():
postSynaptic['ADFR'][nextState] += 1
postSynaptic['AIAL'][nextState] += 3
postSynaptic['AIBL'][nextState] += 7
postSynaptic['AIBR'][nextState] += 2
postSynaptic['AIYL'][nextState] += 13
postSynaptic['AIYR'][nextState] += 6
postSynaptic['AWCL'][nextState] += 4
postSynaptic['AWCR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
def ASER():
postSynaptic['AFDL'][nextState] += 1
postSynaptic['AFDR'][nextState] += 2
postSynaptic['AIAL'][nextState] += 1
postSynaptic['AIAR'][nextState] += 3
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AIBR'][nextState] += 10
postSynaptic['AIYL'][nextState] += 2
postSynaptic['AIYR'][nextState] += 14
postSynaptic['AWAR'][nextState] += 1
postSynaptic['AWCL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
def ASGL():
postSynaptic['AIAL'][nextState] += 9
postSynaptic['AIBL'][nextState] += 3
postSynaptic['AINR'][nextState] += 2
postSynaptic['AIZL'][nextState] += 1
postSynaptic['ASKL'][nextState] += 1
def ASGR():
postSynaptic['AIAR'][nextState] += 10
postSynaptic['AIBR'][nextState] += 2
postSynaptic['AINL'][nextState] += 1
postSynaptic['AIYR'][nextState] += 1
postSynaptic['AIZR'][nextState] += 1
def ASHL():
postSynaptic['ADAL'][nextState] += 2
postSynaptic['ADFL'][nextState] += 3
postSynaptic['AIAL'][nextState] += 7
postSynaptic['AIBL'][nextState] += 5
postSynaptic['AIZL'][nextState] += 1
postSynaptic['ASHR'][nextState] += 1
postSynaptic['ASKL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVBL'][nextState] += 6
postSynaptic['AVDL'][nextState] += 2
postSynaptic['AVDR'][nextState] += 2
postSynaptic['RIAL'][nextState] += 4
postSynaptic['RICL'][nextState] += 2
postSynaptic['RIML'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
def ASHR():
postSynaptic['ADAR'][nextState] += 3
postSynaptic['ADFR'][nextState] += 2
postSynaptic['AIAR'][nextState] += 10
postSynaptic['AIBR'][nextState] += 3
postSynaptic['AIZR'][nextState] += 1
postSynaptic['ASHL'][nextState] += 1
postSynaptic['ASKR'][nextState] += 1
postSynaptic['AVAR'][nextState] += 5
postSynaptic['AVBR'][nextState] += 3
postSynaptic['AVDL'][nextState] += 5
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVER'][nextState] += 3
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 2
postSynaptic['RICR'][nextState] += 2
postSynaptic['RMGR'][nextState] += 2
postSynaptic['RMGR'][nextState] += 1
def ASIL():
postSynaptic['AIAL'][nextState] += 2
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIYL'][nextState] += 2
postSynaptic['AIZL'][nextState] += 1
postSynaptic['ASER'][nextState] += 1
postSynaptic['ASIR'][nextState] += 1
postSynaptic['ASKL'][nextState] += 2
postSynaptic['AWCL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
def ASIR():
postSynaptic['AIAL'][nextState] += 1
postSynaptic['AIAR'][nextState] += 3
postSynaptic['AIAR'][nextState] += 2
postSynaptic['AIBR'][nextState] += 1
postSynaptic['ASEL'][nextState] += 2
postSynaptic['ASHR'][nextState] += 1
postSynaptic['ASIL'][nextState] += 1
postSynaptic['AWCL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
def ASJL():
postSynaptic['ASJR'][nextState] += 1
postSynaptic['ASKL'][nextState] += 4
postSynaptic['HSNL'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVQL'][nextState] += 14
def ASJR():
postSynaptic['ASJL'][nextState] += 1
postSynaptic['ASKR'][nextState] += 4
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVQR'][nextState] += 13
def ASKL():
postSynaptic['AIAL'][nextState] += 11
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AIML'][nextState] += 2
postSynaptic['ASKR'][nextState] += 1
postSynaptic['PVQL'][nextState] += 5
postSynaptic['RMGL'][nextState] += 1
def ASKR():
postSynaptic['AIAR'][nextState] += 11
postSynaptic['AIMR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 1
postSynaptic['ASKL'][nextState] += 1
postSynaptic['AWAR'][nextState] += 1
postSynaptic['CEPVR'][nextState] += 1
postSynaptic['PVQR'][nextState] += 4
postSynaptic['RIFR'][nextState] += 1
postSynaptic['RMGR'][nextState] += 1
def AUAL():
postSynaptic['AINR'][nextState] += 1
postSynaptic['AUAR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 3
postSynaptic['AWBL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 5
postSynaptic['RIBL'][nextState] += 9
def AUAR():
postSynaptic['AINL'][nextState] += 1
postSynaptic['AIYR'][nextState] += 1
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVER'][nextState] += 4
postSynaptic['AWBR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 6
postSynaptic['RIBR'][nextState] += 13
postSynaptic['URXR'][nextState] += 1
def AVAL():
postSynaptic['AS1'][nextState] += 3
postSynaptic['AS10'][nextState] += 3
postSynaptic['AS11'][nextState] += 4
postSynaptic['AS2'][nextState] += 1
postSynaptic['AS3'][nextState] += 3
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 4
postSynaptic['AS6'][nextState] += 1
postSynaptic['AS7'][nextState] += 14
postSynaptic['AS8'][nextState] += 9
postSynaptic['AS9'][nextState] += 12
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVHL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 2
postSynaptic['DA1'][nextState] += 4
postSynaptic['DA2'][nextState] += 4
postSynaptic['DA3'][nextState] += 6
postSynaptic['DA4'][nextState] += 10
postSynaptic['DA5'][nextState] += 8
postSynaptic['DA6'][nextState] += 21
postSynaptic['DA7'][nextState] += 4
postSynaptic['DA8'][nextState] += 4
postSynaptic['DA9'][nextState] += 3
postSynaptic['DB5'][nextState] += 2
postSynaptic['DB6'][nextState] += 4
postSynaptic['FLPL'][nextState] += 1
postSynaptic['LUAL'][nextState] += 2
postSynaptic['PVCL'][nextState] += 12
postSynaptic['PVCR'][nextState] += 11
postSynaptic['PVPL'][nextState] += 1
postSynaptic['RIMR'][nextState] += 3
postSynaptic['SABD'][nextState] += 4
postSynaptic['SABVR'][nextState] += 1
postSynaptic['SDQR'][nextState] += 1
postSynaptic['URYDL'][nextState] += 1
postSynaptic['URYVR'][nextState] += 1
postSynaptic['VA1'][nextState] += 3
postSynaptic['VA10'][nextState] += 6
postSynaptic['VA11'][nextState] += 7
postSynaptic['VA12'][nextState] += 2
postSynaptic['VA2'][nextState] += 5
postSynaptic['VA3'][nextState] += 3
postSynaptic['VA4'][nextState] += 3
postSynaptic['VA5'][nextState] += 8
postSynaptic['VA6'][nextState] += 10
postSynaptic['VA7'][nextState] += 2
postSynaptic['VA8'][nextState] += 19
postSynaptic['VA9'][nextState] += 8
postSynaptic['VB9'][nextState] += 5
def AVAR():
postSynaptic['ADER'][nextState] += 1
postSynaptic['AS1'][nextState] += 3
postSynaptic['AS10'][nextState] += 2
postSynaptic['AS11'][nextState] += 6
postSynaptic['AS2'][nextState] += 2
postSynaptic['AS3'][nextState] += 2
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 2
postSynaptic['AS6'][nextState] += 3
postSynaptic['AS7'][nextState] += 8
postSynaptic['AS8'][nextState] += 9
postSynaptic['AS9'][nextState] += 6
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AVER'][nextState] += 2
postSynaptic['DA1'][nextState] += 8
postSynaptic['DA2'][nextState] += 4
postSynaptic['DA3'][nextState] += 5
postSynaptic['DA4'][nextState] += 8
postSynaptic['DA5'][nextState] += 7
postSynaptic['DA6'][nextState] += 13
postSynaptic['DA7'][nextState] += 3
postSynaptic['DA8'][nextState] += 9
postSynaptic['DA9'][nextState] += 2
postSynaptic['DB3'][nextState] += 1
postSynaptic['DB5'][nextState] += 3
postSynaptic['DB6'][nextState] += 5
postSynaptic['LUAL'][nextState] += 1
postSynaptic['LUAR'][nextState] += 3
postSynaptic['PDEL'][nextState] += 1
postSynaptic['PDER'][nextState] += 1
postSynaptic['PVCL'][nextState] += 7
postSynaptic['PVCR'][nextState] += 8
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 1
postSynaptic['SABD'][nextState] += 1
postSynaptic['SABVL'][nextState] += 6
postSynaptic['SABVR'][nextState] += 1
postSynaptic['URYDR'][nextState] += 1
postSynaptic['URYVL'][nextState] += 1
postSynaptic['VA10'][nextState] += 5
postSynaptic['VA11'][nextState] += 15
postSynaptic['VA12'][nextState] += 1
postSynaptic['VA2'][nextState] += 2
postSynaptic['VA3'][nextState] += 7
postSynaptic['VA4'][nextState] += 5
postSynaptic['VA5'][nextState] += 4
postSynaptic['VA6'][nextState] += 5
postSynaptic['VA7'][nextState] += 4
postSynaptic['VA8'][nextState] += 16
postSynaptic['VB9'][nextState] += 10
postSynaptic['VD13'][nextState] += 2
def AVBL():
postSynaptic['AQR'][nextState] += 1
postSynaptic['AS10'][nextState] += 1
postSynaptic['AS3'][nextState] += 1
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['AS6'][nextState] += 1
postSynaptic['AS7'][nextState] += 2
postSynaptic['AS9'][nextState] += 1
postSynaptic['AVAL'][nextState] += 7
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVBR'][nextState] += 4
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 2
postSynaptic['AVL'][nextState] += 1
postSynaptic['DB3'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DB5'][nextState] += 1
postSynaptic['DB6'][nextState] += 2
postSynaptic['DB7'][nextState] += 2
postSynaptic['DVA'][nextState] += 1
postSynaptic['PVNR'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RID'][nextState] += 1
postSynaptic['SDQR'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
postSynaptic['VA10'][nextState] += 1
postSynaptic['VA2'][nextState] += 1
postSynaptic['VA7'][nextState] += 1
postSynaptic['VB1'][nextState] += 1
postSynaptic['VB10'][nextState] += 2
postSynaptic['VB11'][nextState] += 2
postSynaptic['VB2'][nextState] += 4
postSynaptic['VB4'][nextState] += 1
postSynaptic['VB5'][nextState] += 1
postSynaptic['VB6'][nextState] += 1
postSynaptic['VB7'][nextState] += 2
postSynaptic['VB8'][nextState] += 7
postSynaptic['VB9'][nextState] += 1
postSynaptic['VC3'][nextState] += 1
def AVBR():
postSynaptic['AS1'][nextState] += 1
postSynaptic['AS10'][nextState] += 1
postSynaptic['AS3'][nextState] += 1
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['AS6'][nextState] += 2
postSynaptic['AS7'][nextState] += 3
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVBL'][nextState] += 4
postSynaptic['DA5'][nextState] += 1
postSynaptic['DB1'][nextState] += 3
postSynaptic['DB2'][nextState] += 1
postSynaptic['DB3'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DB5'][nextState] += 1
postSynaptic['DB6'][nextState] += 1
postSynaptic['DB7'][nextState] += 1
postSynaptic['DD1'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVNL'][nextState] += 2
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RID'][nextState] += 2
postSynaptic['SIBVL'][nextState] += 1
postSynaptic['VA4'][nextState] += 1
postSynaptic['VA8'][nextState] += 1
postSynaptic['VA9'][nextState] += 2
postSynaptic['VB10'][nextState] += 1
postSynaptic['VB11'][nextState] += 1
postSynaptic['VB2'][nextState] += 1
postSynaptic['VB3'][nextState] += 1
postSynaptic['VB4'][nextState] += 1
postSynaptic['VB6'][nextState] += 2
postSynaptic['VB7'][nextState] += 2
postSynaptic['VB8'][nextState] += 3
postSynaptic['VB9'][nextState] += 6
postSynaptic['VD10'][nextState] += 1
postSynaptic['VD3'][nextState] += 1
def AVDL():
postSynaptic['ADAR'][nextState] += 2
postSynaptic['AS1'][nextState] += 1
postSynaptic['AS10'][nextState] += 1
postSynaptic['AS11'][nextState] += 2
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['AVAL'][nextState] += 13
postSynaptic['AVAR'][nextState] += 19
postSynaptic['AVM'][nextState] += 2
postSynaptic['DA1'][nextState] += 1
postSynaptic['DA2'][nextState] += 1
postSynaptic['DA3'][nextState] += 4
postSynaptic['DA4'][nextState] += 1
postSynaptic['DA5'][nextState] += 1
postSynaptic['DA8'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['FLPR'][nextState] += 1
postSynaptic['LUAL'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
postSynaptic['SABD'][nextState] += 1
postSynaptic['SABVL'][nextState] += 1
postSynaptic['SABVR'][nextState] += 1
postSynaptic['VA5'][nextState] += 1
def AVDR():
postSynaptic['ADAL'][nextState] += 2
postSynaptic['ADLL'][nextState] += 1
postSynaptic['AS10'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['AVAL'][nextState] += 16
postSynaptic['AVAR'][nextState] += 15
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVDL'][nextState] += 2
postSynaptic['AVJL'][nextState] += 2
postSynaptic['DA1'][nextState] += 2
postSynaptic['DA2'][nextState] += 1
postSynaptic['DA3'][nextState] += 1
postSynaptic['DA4'][nextState] += 1
postSynaptic['DA5'][nextState] += 2
postSynaptic['DA8'][nextState] += 1
postSynaptic['DA9'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DVC'][nextState] += 1
postSynaptic['FLPR'][nextState] += 1
postSynaptic['LUAL'][nextState] += 2
postSynaptic['PQR'][nextState] += 1
postSynaptic['SABD'][nextState] += 1
postSynaptic['SABVL'][nextState] += 3
postSynaptic['SABVR'][nextState] += 1
postSynaptic['VA11'][nextState] += 1
postSynaptic['VA2'][nextState] += 1
postSynaptic['VA3'][nextState] += 2
postSynaptic['VA6'][nextState] += 1
def AVEL():
postSynaptic['AS1'][nextState] += 1
postSynaptic['AVAL'][nextState] += 12
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVER'][nextState] += 1
postSynaptic['DA1'][nextState] += 5
postSynaptic['DA2'][nextState] += 1
postSynaptic['DA3'][nextState] += 3
postSynaptic['DA4'][nextState] += 1
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 3
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['RMEV'][nextState] += 1
postSynaptic['SABD'][nextState] += 6
postSynaptic['SABVL'][nextState] += 7
postSynaptic['SABVR'][nextState] += 3
postSynaptic['VA1'][nextState] += 5
postSynaptic['VA3'][nextState] += 3
postSynaptic['VD2'][nextState] += 1
postSynaptic['VD3'][nextState] += 1
def AVER():
postSynaptic['AS1'][nextState] += 3
postSynaptic['AS2'][nextState] += 2
postSynaptic['AS3'][nextState] += 1
postSynaptic['AVAL'][nextState] += 7
postSynaptic['AVAR'][nextState] += 16
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['DA1'][nextState] += 5
postSynaptic['DA2'][nextState] += 3
postSynaptic['DA3'][nextState] += 1
postSynaptic['DB3'][nextState] += 1
postSynaptic['RIML'][nextState] += 3
postSynaptic['RIMR'][nextState] += 2
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['RMEV'][nextState] += 1
postSynaptic['SABD'][nextState] += 2
postSynaptic['SABVL'][nextState] += 3
postSynaptic['SABVR'][nextState] += 3
postSynaptic['VA1'][nextState] += 1
postSynaptic['VA2'][nextState] += 1
postSynaptic['VA3'][nextState] += 2
postSynaptic['VA4'][nextState] += 1
postSynaptic['VA5'][nextState] += 1
def AVFL():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVFR'][nextState] += 30
postSynaptic['AVG'][nextState] += 1
postSynaptic['AVHL'][nextState] += 4
postSynaptic['AVHR'][nextState] += 7
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 1
postSynaptic['AVL'][nextState] += 1
postSynaptic['HSNL'][nextState] += 1
postSynaptic['MVL11'][nextState] += 1
postSynaptic['MVL12'][nextState] += 1
postSynaptic['PDER'][nextState] += 1
postSynaptic['PVNL'][nextState] += 2
postSynaptic['PVQL'][nextState] += 1
postSynaptic['PVQR'][nextState] += 2
postSynaptic['VB1'][nextState] += 1
def AVFR():
postSynaptic['ASJL'][nextState] += 1
postSynaptic['ASKL'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 5
postSynaptic['AVFL'][nextState] += 24
postSynaptic['AVHL'][nextState] += 4
postSynaptic['AVHR'][nextState] += 2
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['MVL14'][nextState] += 2
postSynaptic['MVR14'][nextState] += 2
postSynaptic['PVQL'][nextState] += 1
postSynaptic['VC4'][nextState] += 1
postSynaptic['VD11'][nextState] += 1
def AVG():
postSynaptic['AVAR'][nextState] += 3
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVL'][nextState] += 1
postSynaptic['DA8'][nextState] += 1
postSynaptic['PHAL'][nextState] += 2
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVNR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['PVQR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIFL'][nextState] += 1
postSynaptic['RIFR'][nextState] += 1
postSynaptic['VA11'][nextState] += 1
def AVHL():
postSynaptic['ADFR'][nextState] += 3
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVFL'][nextState] += 2
postSynaptic['AVFR'][nextState] += 5
postSynaptic['AVHR'][nextState] += 2
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 1
postSynaptic['PHBR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 2
postSynaptic['PVQL'][nextState] += 1
postSynaptic['PVQR'][nextState] += 2
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RIR'][nextState] += 3
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMBVR'][nextState] += 1
postSynaptic['VD1'][nextState] += 1
def AVHR():
postSynaptic['ADLL'][nextState] += 1
postSynaptic['ADLR'][nextState] += 2
postSynaptic['AQR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 2
postSynaptic['AVHL'][nextState] += 2
postSynaptic['AVJR'][nextState] += 4
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVPL'][nextState] += 3
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIR'][nextState] += 4
postSynaptic['SMBDL'][nextState] += 1
postSynaptic['SMBVL'][nextState] += 1
def AVJL():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 4
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['AVHL'][nextState] += 2
postSynaptic['AVJR'][nextState] += 4
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PLMR'][nextState] += 2
postSynaptic['PVCL'][nextState] += 2
postSynaptic['PVCR'][nextState] += 5
postSynaptic['PVNR'][nextState] += 1
postSynaptic['RIFR'][nextState] += 1
postSynaptic['RIS'][nextState] += 2
def AVJR():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 3
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 3
postSynaptic['AVER'][nextState] += 3
postSynaptic['AVJL'][nextState] += 5
postSynaptic['PVCL'][nextState] += 3
postSynaptic['PVCR'][nextState] += 4
postSynaptic['PVQR'][nextState] += 1
postSynaptic['SABVL'][nextState] += 1
def AVKL():
postSynaptic['ADER'][nextState] += 1
postSynaptic['AQR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVKR'][nextState] += 2
postSynaptic['AVM'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['PDEL'][nextState] += 3
postSynaptic['PDER'][nextState] += 1
postSynaptic['PVM'][nextState] += 1
postSynaptic['PVPL'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['PVT'][nextState] += 2
postSynaptic['RICL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RMFR'][nextState] += 1
postSynaptic['SAADR'][nextState] += 1
postSynaptic['SIAVR'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMBVR'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['VB1'][nextState] += 4
postSynaptic['VB10'][nextState] += 1
def AVKR():
postSynaptic['ADEL'][nextState] += 1
postSynaptic['AQR'][nextState] += 1
postSynaptic['AVKL'][nextState] += 2
postSynaptic['BDUL'][nextState] += 1
postSynaptic['MVL10'][nextState] += 1
postSynaptic['PVPL'][nextState] += 6
postSynaptic['PVQL'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 2
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMFL'][nextState] += 1
postSynaptic['SAADL'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 2
postSynaptic['SMBDR'][nextState] += 2
postSynaptic['SMBVR'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 2
def AVL():
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['DA2'][nextState] += 1
postSynaptic['DD1'][nextState] += 1
postSynaptic['DD6'][nextState] += 2
postSynaptic['DVB'][nextState] += 1
postSynaptic['DVC'][nextState] += 9
postSynaptic['HSNR'][nextState] += 1
postSynaptic['MVL10'][nextState] += -5
postSynaptic['MVR10'][nextState] += -5
postSynaptic['PVM'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['PVWL'][nextState] += 1
postSynaptic['SABD'][nextState] += 5
postSynaptic['SABVL'][nextState] += 4
postSynaptic['SABVR'][nextState] += 3
postSynaptic['VD12'][nextState] += 4
def AVM():
postSynaptic['ADER'][nextState] += 1
postSynaptic['ALML'][nextState] += 1
postSynaptic['ALMR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 6
postSynaptic['AVBR'][nextState] += 6
postSynaptic['AVDL'][nextState] += 2
postSynaptic['AVJR'][nextState] += 1
postSynaptic['BDUL'][nextState] += 3
postSynaptic['BDUR'][nextState] += 2
postSynaptic['DA1'][nextState] += 1
postSynaptic['PVCL'][nextState] += 4
postSynaptic['PVCR'][nextState] += 5
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVR'][nextState] += 3
postSynaptic['RID'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
postSynaptic['VA1'][nextState] += 2
def AWAL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['AFDL'][nextState] += 5
postSynaptic['AIAL'][nextState] += 1
postSynaptic['AIYL'][nextState] += 1
postSynaptic['AIZL'][nextState] += 10
postSynaptic['ASEL'][nextState] += 4
postSynaptic['ASGL'][nextState] += 1
postSynaptic['AWAR'][nextState] += 1
postSynaptic['AWBL'][nextState] += 1
def AWAR():
postSynaptic['ADFR'][nextState] += 3
postSynaptic['AFDR'][nextState] += 7
postSynaptic['AIAR'][nextState] += 1
postSynaptic['AIYR'][nextState] += 2
postSynaptic['AIZR'][nextState] += 7
postSynaptic['AIZR'][nextState] += 1
postSynaptic['ASEL'][nextState] += 1
postSynaptic['ASER'][nextState] += 2
postSynaptic['AUAR'][nextState] += 1
postSynaptic['AWAL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 1
postSynaptic['RIFR'][nextState] += 2
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIR'][nextState] += 2
def AWBL():
postSynaptic['ADFL'][nextState] += 9
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 9
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 1
postSynaptic['RIAL'][nextState] += 3
postSynaptic['RMGL'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 1
def AWBR():
postSynaptic['ADFR'][nextState] += 4
postSynaptic['AIZR'][nextState] += 4
postSynaptic['ASGR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 2
postSynaptic['AUAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AWBL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIR'][nextState] += 2
postSynaptic['RMGR'][nextState] += 1
postSynaptic['SMBVR'][nextState] += 1
def AWCL():
postSynaptic['AIAL'][nextState] += 2
postSynaptic['AIAR'][nextState] += 4
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIYL'][nextState] += 10
postSynaptic['ASEL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AWCR'][nextState] += 1
postSynaptic['RIAL'][nextState] += 3
def AWCR():
postSynaptic['AIAR'][nextState] += 1
postSynaptic['AIBR'][nextState] += 4
postSynaptic['AIYL'][nextState] += 4
postSynaptic['AIYR'][nextState] += 9
postSynaptic['ASEL'][nextState] += 1
postSynaptic['ASGR'][nextState] += 1
postSynaptic['AWCL'][nextState] += 5
def BAGL():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 4
postSynaptic['BAGR'][nextState] += 2
postSynaptic['RIAR'][nextState] += 5
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 7
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIGR'][nextState] += 4
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIR'][nextState] += 1
def BAGR():
postSynaptic['AIYL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVEL'][nextState] += 2
postSynaptic['BAGL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 5
postSynaptic['RIBL'][nextState] += 4
postSynaptic['RIGL'][nextState] += 5
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIR'][nextState] += 1
def BDUL():
postSynaptic['ADEL'][nextState] += 3
postSynaptic['AVHL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 1
postSynaptic['HSNL'][nextState] += 1
postSynaptic['PVNL'][nextState] += 2
postSynaptic['PVNR'][nextState] += 2
postSynaptic['SAADL'][nextState] += 1
postSynaptic['URADL'][nextState] += 1
def BDUR():
postSynaptic['ADER'][nextState] += 1
postSynaptic['ALMR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVHL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 2
postSynaptic['HSNR'][nextState] += 4
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVNL'][nextState] += 2
postSynaptic['PVNR'][nextState] += 1
postSynaptic['SDQL'][nextState] += 1
postSynaptic['URADR'][nextState] += 1
def CEPDL():
postSynaptic['AVER'][nextState] += 5
postSynaptic['IL1DL'][nextState] += 4
postSynaptic['OLLL'][nextState] += 2
postSynaptic['OLQDL'][nextState] += 6
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RICL'][nextState] += 1
postSynaptic['RICR'][nextState] += 2
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIPL'][nextState] += 2
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 3
postSynaptic['RMGL'][nextState] += 4
postSynaptic['RMHR'][nextState] += 4
postSynaptic['SIADR'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['URADL'][nextState] += 2
postSynaptic['URBL'][nextState] += 4
postSynaptic['URYDL'][nextState] += 2
def CEPDR():
postSynaptic['AVEL'][nextState] += 6
postSynaptic['BDUR'][nextState] += 1
postSynaptic['IL1DR'][nextState] += 5
postSynaptic['IL1R'][nextState] += 1
postSynaptic['OLLR'][nextState] += 8
postSynaptic['OLQDR'][nextState] += 5
postSynaptic['OLQDR'][nextState] += 2
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RICL'][nextState] += 4
postSynaptic['RICR'][nextState] += 3
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 2
postSynaptic['RMGR'][nextState] += 1
postSynaptic['RMHL'][nextState] += 4
postSynaptic['RMHR'][nextState] += 1
postSynaptic['SIADL'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['URADR'][nextState] += 1
postSynaptic['URBR'][nextState] += 2
postSynaptic['URYDR'][nextState] += 1
def CEPVL():
postSynaptic['ADLL'][nextState] += 1
postSynaptic['AVER'][nextState] += 3
postSynaptic['IL1VL'][nextState] += 2
postSynaptic['MVL03'][nextState] += 1
postSynaptic['OLLL'][nextState] += 4
postSynaptic['OLQVL'][nextState] += 6
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RICL'][nextState] += 7
postSynaptic['RICR'][nextState] += 4
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 4
postSynaptic['RMHL'][nextState] += 1
postSynaptic['SIAVL'][nextState] += 1
postSynaptic['URAVL'][nextState] += 2
def CEPVR():
postSynaptic['ASGR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 5
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['IL2VR'][nextState] += 2
postSynaptic['MVR04'][nextState] += 1
postSynaptic['OLLR'][nextState] += 7
postSynaptic['OLQVR'][nextState] += 3
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RICL'][nextState] += 2
postSynaptic['RICR'][nextState] += 2
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIPR'][nextState] += 1
postSynaptic['RIVL'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 2
postSynaptic['RMHR'][nextState] += 2
postSynaptic['SIAVR'][nextState] += 2
postSynaptic['URAVR'][nextState] += 1
def DA1():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 6
postSynaptic['DA4'][nextState] += 1
postSynaptic['DD1'][nextState] += 4
postSynaptic['MDL08'][nextState] += 8
postSynaptic['MDR08'][nextState] += 8
postSynaptic['SABVL'][nextState] += 2
postSynaptic['SABVR'][nextState] += 3
postSynaptic['VD1'][nextState] += 17
postSynaptic['VD2'][nextState] += 1
def DA2():
postSynaptic['AS2'][nextState] += 2
postSynaptic['AS3'][nextState] += 1
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DD1'][nextState] += 1
postSynaptic['MDL07'][nextState] += 2
postSynaptic['MDL08'][nextState] += 1
postSynaptic['MDL09'][nextState] += 2
postSynaptic['MDL10'][nextState] += 2
postSynaptic['MDR07'][nextState] += 2
postSynaptic['MDR08'][nextState] += 2
postSynaptic['MDR09'][nextState] += 2
postSynaptic['MDR10'][nextState] += 2
postSynaptic['SABVL'][nextState] += 1
postSynaptic['VA1'][nextState] += 2
postSynaptic['VD1'][nextState] += 2
postSynaptic['VD2'][nextState] += 11
postSynaptic['VD3'][nextState] += 5
def DA3():
postSynaptic['AS4'][nextState] += 2
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DA4'][nextState] += 2
postSynaptic['DB3'][nextState] += 1
postSynaptic['DD2'][nextState] += 1
postSynaptic['MDL09'][nextState] += 5
postSynaptic['MDL10'][nextState] += 5
postSynaptic['MDL12'][nextState] += 5
postSynaptic['MDR09'][nextState] += 5
postSynaptic['MDR10'][nextState] += 5
postSynaptic['MDR12'][nextState] += 5
postSynaptic['VD3'][nextState] += 25
postSynaptic['VD4'][nextState] += 6
def DA4():
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DA1'][nextState] += 1
postSynaptic['DA3'][nextState] += 1
postSynaptic['DB3'][nextState] += 2
postSynaptic['DD2'][nextState] += 1
postSynaptic['MDL11'][nextState] += 4
postSynaptic['MDL12'][nextState] += 4
postSynaptic['MDL14'][nextState] += 5
postSynaptic['MDR11'][nextState] += 4
postSynaptic['MDR12'][nextState] += 4
postSynaptic['MDR14'][nextState] += 5
postSynaptic['VB6'][nextState] += 1
postSynaptic['VD4'][nextState] += 12
postSynaptic['VD5'][nextState] += 15
def DA5():
postSynaptic['AS6'][nextState] += 2
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 5
postSynaptic['DB4'][nextState] += 1
postSynaptic['MDL13'][nextState] += 5
postSynaptic['MDL14'][nextState] += 4
postSynaptic['MDR13'][nextState] += 5
postSynaptic['MDR14'][nextState] += 4
postSynaptic['VA4'][nextState] += 1
postSynaptic['VA5'][nextState] += 2
postSynaptic['VD5'][nextState] += 1
postSynaptic['VD6'][nextState] += 16
def DA6():
postSynaptic['AVAL'][nextState] += 10
postSynaptic['AVAR'][nextState] += 2
postSynaptic['MDL11'][nextState] += 6
postSynaptic['MDL12'][nextState] += 4
postSynaptic['MDL13'][nextState] += 4
postSynaptic['MDL14'][nextState] += 4
postSynaptic['MDL16'][nextState] += 4
postSynaptic['MDR11'][nextState] += 4
postSynaptic['MDR12'][nextState] += 4
postSynaptic['MDR13'][nextState] += 4
postSynaptic['MDR14'][nextState] += 4
postSynaptic['MDR16'][nextState] += 4
postSynaptic['VD4'][nextState] += 4
postSynaptic['VD5'][nextState] += 3
postSynaptic['VD6'][nextState] += 3
def DA7():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['MDL15'][nextState] += 4
postSynaptic['MDL17'][nextState] += 4
postSynaptic['MDL18'][nextState] += 4
postSynaptic['MDR15'][nextState] += 4
postSynaptic['MDR17'][nextState] += 4
postSynaptic['MDR18'][nextState] += 4
def DA8():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['DA9'][nextState] += 1
postSynaptic['MDL17'][nextState] += 4
postSynaptic['MDL19'][nextState] += 4
postSynaptic['MDL20'][nextState] += 4
postSynaptic['MDR17'][nextState] += 4
postSynaptic['MDR19'][nextState] += 4
postSynaptic['MDR20'][nextState] += 4
def DA9():
postSynaptic['DA8'][nextState] += 1
postSynaptic['DD6'][nextState] += 1
postSynaptic['MDL19'][nextState] += 4
postSynaptic['MDL21'][nextState] += 4
postSynaptic['MDL22'][nextState] += 4
postSynaptic['MDL23'][nextState] += 4
postSynaptic['MDL24'][nextState] += 4
postSynaptic['MDR19'][nextState] += 4
postSynaptic['MDR21'][nextState] += 4
postSynaptic['MDR22'][nextState] += 4
postSynaptic['MDR23'][nextState] += 4
postSynaptic['MDR24'][nextState] += 4
postSynaptic['PDA'][nextState] += 1
postSynaptic['PHCL'][nextState] += 1
postSynaptic['RID'][nextState] += 1
postSynaptic['VD13'][nextState] += 1
def DB1():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AS1'][nextState] += 1
postSynaptic['AS2'][nextState] += 1
postSynaptic['AS3'][nextState] += 1
postSynaptic['AVBR'][nextState] += 3
postSynaptic['DB2'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DD1'][nextState] += 10
postSynaptic['DVA'][nextState] += 1
postSynaptic['MDL07'][nextState] += 1
postSynaptic['MDL08'][nextState] += 1
postSynaptic['MDR07'][nextState] += 1
postSynaptic['MDR08'][nextState] += 1
postSynaptic['RID'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['VB3'][nextState] += 1
postSynaptic['VB4'][nextState] += 1
postSynaptic['VD1'][nextState] += 21
postSynaptic['VD2'][nextState] += 15
postSynaptic['VD3'][nextState] += 1
def DB2():
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DA3'][nextState] += 5
postSynaptic['DB1'][nextState] += 1
postSynaptic['DB3'][nextState] += 6
postSynaptic['DD2'][nextState] += 3
postSynaptic['MDL09'][nextState] += 3
postSynaptic['MDL10'][nextState] += 3
postSynaptic['MDL11'][nextState] += 3
postSynaptic['MDL12'][nextState] += 3
postSynaptic['MDR09'][nextState] += 3
postSynaptic['MDR10'][nextState] += 3
postSynaptic['MDR11'][nextState] += 3
postSynaptic['MDR12'][nextState] += 3
postSynaptic['VB1'][nextState] += 2
postSynaptic['VD3'][nextState] += 23
postSynaptic['VD4'][nextState] += 14
postSynaptic['VD5'][nextState] += 1
def DB3():
postSynaptic['AS4'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DA4'][nextState] += 1
postSynaptic['DB2'][nextState] += 6
postSynaptic['DB4'][nextState] += 1
postSynaptic['DD2'][nextState] += 4
postSynaptic['DD3'][nextState] += 10
postSynaptic['MDL11'][nextState] += 3
postSynaptic['MDL12'][nextState] += 3
postSynaptic['MDL13'][nextState] += 4
postSynaptic['MDL14'][nextState] += 3
postSynaptic['MDR11'][nextState] += 3
postSynaptic['MDR12'][nextState] += 3
postSynaptic['MDR13'][nextState] += 4
postSynaptic['MDR14'][nextState] += 3
postSynaptic['VD4'][nextState] += 9
postSynaptic['VD5'][nextState] += 26
postSynaptic['VD6'][nextState] += 7
def DB4():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DB3'][nextState] += 1
postSynaptic['DD3'][nextState] += 3
postSynaptic['MDL13'][nextState] += 2
postSynaptic['MDL14'][nextState] += 2
postSynaptic['MDL16'][nextState] += 2
postSynaptic['MDR13'][nextState] += 2
postSynaptic['MDR14'][nextState] += 2
postSynaptic['MDR16'][nextState] += 2
postSynaptic['VB2'][nextState] += 1
postSynaptic['VB4'][nextState] += 1
postSynaptic['VD6'][nextState] += 13
def DB5():
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['MDL15'][nextState] += 2
postSynaptic['MDL17'][nextState] += 2
postSynaptic['MDL18'][nextState] += 2
postSynaptic['MDR15'][nextState] += 2
postSynaptic['MDR17'][nextState] += 2
postSynaptic['MDR18'][nextState] += 2
def DB6():
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['MDL17'][nextState] += 2
postSynaptic['MDL19'][nextState] += 2
postSynaptic['MDL20'][nextState] += 2
postSynaptic['MDR17'][nextState] += 2
postSynaptic['MDR19'][nextState] += 2
postSynaptic['MDR20'][nextState] += 2
def DB7():
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['MDL19'][nextState] += 2
postSynaptic['MDL21'][nextState] += 2
postSynaptic['MDL22'][nextState] += 2
postSynaptic['MDL23'][nextState] += 2
postSynaptic['MDL24'][nextState] += 2
postSynaptic['MDR19'][nextState] += 2
postSynaptic['MDR21'][nextState] += 2
postSynaptic['MDR22'][nextState] += 2
postSynaptic['MDR23'][nextState] += 2
postSynaptic['MDR24'][nextState] += 2
postSynaptic['VD13'][nextState] += 2
def DD1():
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DD2'][nextState] += 3
postSynaptic['MDL07'][nextState] += -6
postSynaptic['MDL08'][nextState] += -6
postSynaptic['MDL09'][nextState] += -7
postSynaptic['MDL10'][nextState] += -6
postSynaptic['MDR07'][nextState] += -6
postSynaptic['MDR08'][nextState] += -6
postSynaptic['MDR09'][nextState] += -7
postSynaptic['MDR10'][nextState] += -6
postSynaptic['VD1'][nextState] += 4
postSynaptic['VD2'][nextState] += 1
postSynaptic['VD2'][nextState] += 2
def DD2():
postSynaptic['DA3'][nextState] += 1
postSynaptic['DD1'][nextState] += 1
postSynaptic['DD3'][nextState] += 2
postSynaptic['MDL09'][nextState] += -6
postSynaptic['MDL11'][nextState] += -7
postSynaptic['MDL12'][nextState] += -6
postSynaptic['MDR09'][nextState] += -6
postSynaptic['MDR11'][nextState] += -7
postSynaptic['MDR12'][nextState] += -6
postSynaptic['VD3'][nextState] += 1
postSynaptic['VD4'][nextState] += 3
def DD3():
postSynaptic['DD2'][nextState] += 2
postSynaptic['DD4'][nextState] += 1
postSynaptic['MDL11'][nextState] += -7
postSynaptic['MDL13'][nextState] += -9
postSynaptic['MDL14'][nextState] += -7
postSynaptic['MDR11'][nextState] += -7
postSynaptic['MDR13'][nextState] += -9
postSynaptic['MDR14'][nextState] += -7
def DD4():
postSynaptic['DD3'][nextState] += 1
postSynaptic['MDL13'][nextState] += -7
postSynaptic['MDL15'][nextState] += -7
postSynaptic['MDL16'][nextState] += -7
postSynaptic['MDR13'][nextState] += -7
postSynaptic['MDR15'][nextState] += -7
postSynaptic['MDR16'][nextState] += -7
postSynaptic['VC3'][nextState] += 1
postSynaptic['VD8'][nextState] += 1
def DD5():
postSynaptic['MDL17'][nextState] += -7
postSynaptic['MDL18'][nextState] += -7
postSynaptic['MDL20'][nextState] += -7
postSynaptic['MDR17'][nextState] += -7
postSynaptic['MDR18'][nextState] += -7
postSynaptic['MDR20'][nextState] += -7
postSynaptic['VB8'][nextState] += 1
postSynaptic['VD10'][nextState] += 1
postSynaptic['VD9'][nextState] += 1
def DD6():
postSynaptic['MDL19'][nextState] += -7
postSynaptic['MDL21'][nextState] += -7
postSynaptic['MDL22'][nextState] += -7
postSynaptic['MDL23'][nextState] += -7
postSynaptic['MDL24'][nextState] += -7
postSynaptic['MDR19'][nextState] += -7
postSynaptic['MDR21'][nextState] += -7
postSynaptic['MDR22'][nextState] += -7
postSynaptic['MDR23'][nextState] += -7
postSynaptic['MDR24'][nextState] += -7
def DVA():
postSynaptic['AIZL'][nextState] += 3
postSynaptic['AQR'][nextState] += 4
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AUAR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 9
postSynaptic['AVER'][nextState] += 5
postSynaptic['DB1'][nextState] += 1
postSynaptic['DB2'][nextState] += 1
postSynaptic['DB3'][nextState] += 2
postSynaptic['DB4'][nextState] += 1
postSynaptic['DB5'][nextState] += 1
postSynaptic['DB6'][nextState] += 2
postSynaptic['DB7'][nextState] += 1
postSynaptic['PDEL'][nextState] += 3
postSynaptic['PVCL'][nextState] += 3
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVR'][nextState] += 3
postSynaptic['PVR'][nextState] += 2
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RIR'][nextState] += 3
postSynaptic['SAADR'][nextState] += 1
postSynaptic['SAAVL'][nextState] += 1
postSynaptic['SAAVR'][nextState] += 1
postSynaptic['SABD'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 3
postSynaptic['SMBDR'][nextState] += 2
postSynaptic['SMBVL'][nextState] += 3
postSynaptic['SMBVR'][nextState] += 2
postSynaptic['VA12'][nextState] += 1
postSynaptic['VA2'][nextState] += 1
postSynaptic['VB1'][nextState] += 1
postSynaptic['VB11'][nextState] += 2
def DVB():
postSynaptic['AS9'][nextState] += 7
postSynaptic['AVL'][nextState] += 5
postSynaptic['AVL'][nextState] += 1
postSynaptic['DA8'][nextState] += 2
postSynaptic['DD6'][nextState] += 3
postSynaptic['DVC'][nextState] += 3
# postSynaptic['MANAL'][nextState] += -5 - just not needed or used
postSynaptic['PDA'][nextState] += 1
postSynaptic['PHCL'][nextState] += 1
postSynaptic['PVPL'][nextState] += 1
postSynaptic['VA9'][nextState] += 1
postSynaptic['VB9'][nextState] += 1
def DVC():
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AIBR'][nextState] += 5
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 2
postSynaptic['AVKR'][nextState] += 1
postSynaptic['AVL'][nextState] += 9
postSynaptic['PVPL'][nextState] += 2
postSynaptic['PVPR'][nextState] += 13
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 5
postSynaptic['RIGR'][nextState] += 5
postSynaptic['RMFL'][nextState] += 2
postSynaptic['RMFR'][nextState] += 4
postSynaptic['VA9'][nextState] += 1
postSynaptic['VD1'][nextState] += 5
postSynaptic['VD10'][nextState] += 4
def FLPL():
postSynaptic['ADEL'][nextState] += 2
postSynaptic['ADER'][nextState] += 2
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 2
postSynaptic['AVAL'][nextState] += 15
postSynaptic['AVAR'][nextState] += 17
postSynaptic['AVBL'][nextState] += 4
postSynaptic['AVBR'][nextState] += 5
postSynaptic['AVDL'][nextState] += 7
postSynaptic['AVDR'][nextState] += 13
postSynaptic['DVA'][nextState] += 1
postSynaptic['FLPR'][nextState] += 3
postSynaptic['RIH'][nextState] += 1
def FLPR():
postSynaptic['ADER'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 12
postSynaptic['AVAR'][nextState] += 5
postSynaptic['AVBL'][nextState] += 5
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 10
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 4
postSynaptic['AVER'][nextState] += 2
postSynaptic['AVJR'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['FLPL'][nextState] += 4
postSynaptic['PVCL'][nextState] += 2
postSynaptic['VB1'][nextState] += 1
def HSNL():
postSynaptic['AIAL'][nextState] += 1
postSynaptic['AIZL'][nextState] += 2
postSynaptic['AIZR'][nextState] += 1
postSynaptic['ASHL'][nextState] += 1
postSynaptic['ASHR'][nextState] += 2
postSynaptic['ASJR'][nextState] += 1
postSynaptic['ASKL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVFL'][nextState] += 6
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AWBL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 2
postSynaptic['HSNR'][nextState] += 3
postSynaptic['HSNR'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 7
postSynaptic['RIFL'][nextState] += 3
postSynaptic['RIML'][nextState] += 2
postSynaptic['SABVL'][nextState] += 2
postSynaptic['VC5'][nextState] += 3
def HSNR():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 1
postSynaptic['AIZR'][nextState] += 1
postSynaptic['AS5'][nextState] += 1
postSynaptic['ASHL'][nextState] += 2
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVL'][nextState] += 1
postSynaptic['AWBL'][nextState] += 1
postSynaptic['BDUR'][nextState] += 1
postSynaptic['DA5'][nextState] += 1
postSynaptic['DA6'][nextState] += 1
postSynaptic['HSNL'][nextState] += 2
postSynaptic['MVULVA'][nextState] += 6
postSynaptic['PVNR'][nextState] += 2
postSynaptic['PVQR'][nextState] += 1
postSynaptic['RIFR'][nextState] += 4
postSynaptic['RMGR'][nextState] += 1
postSynaptic['SABD'][nextState] += 1
postSynaptic['SABVR'][nextState] += 1
postSynaptic['VA6'][nextState] += 1
postSynaptic['VC2'][nextState] += 3
postSynaptic['VC3'][nextState] += 1
postSynaptic['VD4'][nextState] += 2
def I1L():
postSynaptic['I1R'][nextState] += 1
postSynaptic['I3'][nextState] += 1
postSynaptic['I5'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RIPR'][nextState] += 1
def I1R():
postSynaptic['I1L'][nextState] += 1
postSynaptic['I3'][nextState] += 1
postSynaptic['I5'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RIPR'][nextState] += 1
def I2L():
postSynaptic['I1L'][nextState] += 1
postSynaptic['I1R'][nextState] += 1
postSynaptic['M1'][nextState] += 4
def I2R():
postSynaptic['I1L'][nextState] += 1
postSynaptic['I1R'][nextState] += 1
postSynaptic['M1'][nextState] += 4
def I3():
postSynaptic['M1'][nextState] += 4
postSynaptic['M2L'][nextState] += 2
postSynaptic['M2R'][nextState] += 2
def I4():
postSynaptic['I2L'][nextState] += 5
postSynaptic['I2R'][nextState] += 5
postSynaptic['I5'][nextState] += 2
postSynaptic['M1'][nextState] += 4
def I5():
postSynaptic['I1L'][nextState] += 4
postSynaptic['I1R'][nextState] += 3
postSynaptic['M1'][nextState] += 2
postSynaptic['M5'][nextState] += 2
postSynaptic['MI'][nextState] += 4
def I6():
postSynaptic['I2L'][nextState] += 2
postSynaptic['I2R'][nextState] += 2
postSynaptic['I3'][nextState] += 1
postSynaptic['M4'][nextState] += 1
postSynaptic['M5'][nextState] += 2
postSynaptic['NSML'][nextState] += 2
postSynaptic['NSMR'][nextState] += 2
def IL1DL():
postSynaptic['IL1DR'][nextState] += 1
postSynaptic['IL1L'][nextState] += 1
postSynaptic['MDL01'][nextState] += 1
postSynaptic['MDL02'][nextState] += 1
postSynaptic['MDL04'][nextState] += 2
postSynaptic['OLLL'][nextState] += 1
postSynaptic['PVR'][nextState] += 1
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIPL'][nextState] += 2
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 4
postSynaptic['RMEV'][nextState] += 1
postSynaptic['URYDL'][nextState] += 1
def IL1DR():
postSynaptic['IL1DL'][nextState] += 1
postSynaptic['IL1R'][nextState] += 1
postSynaptic['MDR01'][nextState] += 4
postSynaptic['MDR02'][nextState] += 3
postSynaptic['OLLR'][nextState] += 1
postSynaptic['RIPR'][nextState] += 5
postSynaptic['RMDVR'][nextState] += 5
postSynaptic['RMEV'][nextState] += 1
def IL1L():
postSynaptic['AVER'][nextState] += 2
postSynaptic['IL1DL'][nextState] += 2
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['MDL01'][nextState] += 3
postSynaptic['MDL03'][nextState] += 3
postSynaptic['MDL05'][nextState] += 4
postSynaptic['MVL01'][nextState] += 3
postSynaptic['MVL03'][nextState] += 3
postSynaptic['RMDDL'][nextState] += 5
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 3
postSynaptic['RMDVL'][nextState] += 4
postSynaptic['RMDVR'][nextState] += 2
postSynaptic['RMER'][nextState] += 1
def IL1R():
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['IL1DR'][nextState] += 2
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['MDR01'][nextState] += 3
postSynaptic['MDR03'][nextState] += 3
postSynaptic['MVR01'][nextState] += 3
postSynaptic['MVR03'][nextState] += 3
postSynaptic['RMDDL'][nextState] += 3
postSynaptic['RMDDR'][nextState] += 2
postSynaptic['RMDL'][nextState] += 4
postSynaptic['RMDR'][nextState] += 2
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 4
postSynaptic['RMEL'][nextState] += 2
postSynaptic['RMHL'][nextState] += 1
postSynaptic['URXR'][nextState] += 2
def IL1VL():
postSynaptic['IL1L'][nextState] += 2
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['MVL01'][nextState] += 5
postSynaptic['MVL02'][nextState] += 4
postSynaptic['RIPL'][nextState] += 4
postSynaptic['RMDDL'][nextState] += 5
postSynaptic['RMED'][nextState] += 1
postSynaptic['URYVL'][nextState] += 1
def IL1VR():
postSynaptic['IL1R'][nextState] += 2
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['IL2R'][nextState] += 1
postSynaptic['IL2VR'][nextState] += 1
postSynaptic['MVR01'][nextState] += 5
postSynaptic['MVR02'][nextState] += 5
postSynaptic['RIPR'][nextState] += 6
postSynaptic['RMDDR'][nextState] += 10
postSynaptic['RMER'][nextState] += 1
def IL2DL():
postSynaptic['AUAL'][nextState] += 1
postSynaptic['IL1DL'][nextState] += 7
postSynaptic['OLQDL'][nextState] += 2
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIPL'][nextState] += 10
postSynaptic['RMEL'][nextState] += 4
postSynaptic['RMER'][nextState] += 3
postSynaptic['URADL'][nextState] += 3
def IL2DR():
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['IL1DR'][nextState] += 7
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIPR'][nextState] += 11
postSynaptic['RMED'][nextState] += 1
postSynaptic['RMEL'][nextState] += 2
postSynaptic['RMER'][nextState] += 2
postSynaptic['RMEV'][nextState] += 1
postSynaptic['URADR'][nextState] += 3
def IL2L():
postSynaptic['ADEL'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['IL1L'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 5
postSynaptic['OLQVL'][nextState] += 8
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIH'][nextState] += 7
postSynaptic['RMDL'][nextState] += 3
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMER'][nextState] += 2
postSynaptic['RMEV'][nextState] += 2
postSynaptic['RMGL'][nextState] += 1
postSynaptic['URXL'][nextState] += 2
def IL2R():
postSynaptic['ADER'][nextState] += 1
postSynaptic['IL1R'][nextState] += 1
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['OLLR'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 2
postSynaptic['OLQVR'][nextState] += 7
postSynaptic['RIH'][nextState] += 6
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMEL'][nextState] += 2
postSynaptic['RMEV'][nextState] += 1
postSynaptic['RMGR'][nextState] += 1
postSynaptic['URBR'][nextState] += 1
postSynaptic['URXR'][nextState] += 1
def IL2VL():
postSynaptic['BAGR'][nextState] += 1
postSynaptic['IL1VL'][nextState] += 7
postSynaptic['IL2L'][nextState] += 1
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIH'][nextState] += 2
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RMEL'][nextState] += 1
postSynaptic['RMER'][nextState] += 4
postSynaptic['RMEV'][nextState] += 1
postSynaptic['URAVL'][nextState] += 3
def IL2VR():
postSynaptic['IL1VR'][nextState] += 6
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 2
postSynaptic['RIH'][nextState] += 3
postSynaptic['RIPR'][nextState] += 15
postSynaptic['RMEL'][nextState] += 3
postSynaptic['RMER'][nextState] += 2
postSynaptic['RMEV'][nextState] += 3
postSynaptic['URAVR'][nextState] += 4
postSynaptic['URXR'][nextState] += 1
def LUAL():
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 6
postSynaptic['AVDL'][nextState] += 4
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVJL'][nextState] += 1
postSynaptic['PHBL'][nextState] += 1
postSynaptic['PLML'][nextState] += 1
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVR'][nextState] += 1
postSynaptic['PVWL'][nextState] += 1
def LUAR():
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 3
postSynaptic['AVJR'][nextState] += 1
postSynaptic['PLMR'][nextState] += 1
postSynaptic['PQR'][nextState] += 1
postSynaptic['PVCR'][nextState] += 3
postSynaptic['PVR'][nextState] += 2
postSynaptic['PVWL'][nextState] += 1
def M1():
postSynaptic['I2L'][nextState] += 2
postSynaptic['I2R'][nextState] += 2
postSynaptic['I3'][nextState] += 1
postSynaptic['I4'][nextState] += 1
def M2L():
postSynaptic['I1L'][nextState] += 3
postSynaptic['I1R'][nextState] += 3
postSynaptic['I3'][nextState] += 3
postSynaptic['M2R'][nextState] += 1
postSynaptic['M5'][nextState] += 1
postSynaptic['MI'][nextState] += 4
def M2R():
postSynaptic['I1L'][nextState] += 3
postSynaptic['I1R'][nextState] += 3
postSynaptic['I3'][nextState] += 3
postSynaptic['M3L'][nextState] += 1
postSynaptic['M3R'][nextState] += 1
postSynaptic['M5'][nextState] += 1
postSynaptic['MI'][nextState] += 4
def M3L():
postSynaptic['I1L'][nextState] += 4
postSynaptic['I1R'][nextState] += 4
postSynaptic['I4'][nextState] += 2
postSynaptic['I5'][nextState] += 3
postSynaptic['I6'][nextState] += 1
postSynaptic['M1'][nextState] += 2
postSynaptic['M3R'][nextState] += 1
postSynaptic['MCL'][nextState] += 1
postSynaptic['MCR'][nextState] += 1
postSynaptic['MI'][nextState] += 2
postSynaptic['NSML'][nextState] += 2
postSynaptic['NSMR'][nextState] += 3
def M3R():
postSynaptic['I1L'][nextState] += 4
postSynaptic['I1R'][nextState] += 4
postSynaptic['I3'][nextState] += 2
postSynaptic['I4'][nextState] += 6
postSynaptic['I5'][nextState] += 3
postSynaptic['I6'][nextState] += 1
postSynaptic['M1'][nextState] += 2
postSynaptic['M3L'][nextState] += 1
postSynaptic['MCL'][nextState] += 1
postSynaptic['MCR'][nextState] += 1
postSynaptic['MI'][nextState] += 2
postSynaptic['NSML'][nextState] += 2
postSynaptic['NSMR'][nextState] += 3
def M4():
postSynaptic['I3'][nextState] += 1
postSynaptic['I5'][nextState] += 13
postSynaptic['I6'][nextState] += 3
postSynaptic['M2L'][nextState] += 1
postSynaptic['M2R'][nextState] += 1
postSynaptic['M4'][nextState] += 6
postSynaptic['M5'][nextState] += 1
postSynaptic['NSML'][nextState] += 1
postSynaptic['NSMR'][nextState] += 1
def M5():
postSynaptic['I5'][nextState] += 3
postSynaptic['I5'][nextState] += 1
postSynaptic['I6'][nextState] += 1
postSynaptic['M1'][nextState] += 2
postSynaptic['M2L'][nextState] += 2
postSynaptic['M2R'][nextState] += 2
postSynaptic['M5'][nextState] += 4
def MCL():
postSynaptic['I1L'][nextState] += 3
postSynaptic['I1R'][nextState] += 3
postSynaptic['I2L'][nextState] += 1
postSynaptic['I2R'][nextState] += 1
postSynaptic['I3'][nextState] += 1
postSynaptic['M1'][nextState] += 2
postSynaptic['M2L'][nextState] += 2
postSynaptic['M2R'][nextState] += 2
def MCR():
postSynaptic['I1L'][nextState] += 3
postSynaptic['I1R'][nextState] += 3
postSynaptic['I3'][nextState] += 1
postSynaptic['M1'][nextState] += 2
postSynaptic['M2L'][nextState] += 2
postSynaptic['M2R'][nextState] += 2
def MI():
postSynaptic['I1L'][nextState] += 1
postSynaptic['I1R'][nextState] += 1
postSynaptic['I3'][nextState] += 1
postSynaptic['I4'][nextState] += 1
postSynaptic['I5'][nextState] += 2
postSynaptic['M1'][nextState] += 1
postSynaptic['M2L'][nextState] += 2
postSynaptic['M2R'][nextState] += 2
postSynaptic['M3L'][nextState] += 1
postSynaptic['M3R'][nextState] += 1
postSynaptic['MCL'][nextState] += 2
postSynaptic['MCR'][nextState] += 2
def NSML():
postSynaptic['I1L'][nextState] += 1
postSynaptic['I1R'][nextState] += 2
postSynaptic['I2L'][nextState] += 6
postSynaptic['I2R'][nextState] += 6
postSynaptic['I3'][nextState] += 2
postSynaptic['I4'][nextState] += 3
postSynaptic['I5'][nextState] += 2
postSynaptic['I6'][nextState] += 2
postSynaptic['M3L'][nextState] += 2
postSynaptic['M3R'][nextState] += 2
def NSMR():
postSynaptic['I1L'][nextState] += 2
postSynaptic['I1R'][nextState] += 2
postSynaptic['I2L'][nextState] += 6
postSynaptic['I2R'][nextState] += 6
postSynaptic['I3'][nextState] += 2
postSynaptic['I4'][nextState] += 3
postSynaptic['I5'][nextState] += 2
postSynaptic['I6'][nextState] += 2
postSynaptic['M3L'][nextState] += 2
postSynaptic['M3R'][nextState] += 2
def OLLL():
postSynaptic['AVER'][nextState] += 21
postSynaptic['CEPDL'][nextState] += 3
postSynaptic['CEPVL'][nextState] += 4
postSynaptic['IL1DL'][nextState] += 1
postSynaptic['IL1VL'][nextState] += 2
postSynaptic['OLLR'][nextState] += 2
postSynaptic['RIBL'][nextState] += 8
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 7
postSynaptic['RMDL'][nextState] += 2
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMEL'][nextState] += 2
postSynaptic['SMDDL'][nextState] += 3
postSynaptic['SMDDR'][nextState] += 4
postSynaptic['SMDVR'][nextState] += 4
postSynaptic['URYDL'][nextState] += 1
def OLLR():
postSynaptic['AVEL'][nextState] += 16
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['CEPVR'][nextState] += 6
postSynaptic['IL1DR'][nextState] += 3
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['IL2R'][nextState] += 1
postSynaptic['OLLL'][nextState] += 2
postSynaptic['RIBR'][nextState] += 10
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 10
postSynaptic['RMDL'][nextState] += 3
postSynaptic['RMDVR'][nextState] += 3
postSynaptic['RMER'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 4
postSynaptic['SMDVR'][nextState] += 3
def OLQDL():
postSynaptic['CEPDL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 4
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 3
postSynaptic['URBL'][nextState] += 1
def OLQDR():
postSynaptic['CEPDR'][nextState] += 2
postSynaptic['RIBR'][nextState] += 2
postSynaptic['RICL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIH'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 3
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['RMHR'][nextState] += 1
postSynaptic['SIBVR'][nextState] += 2
postSynaptic['URBR'][nextState] += 1
def OLQVL():
postSynaptic['ADLL'][nextState] += 1
postSynaptic['CEPVL'][nextState] += 1
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['IL2VL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 4
postSynaptic['SIBDL'][nextState] += 3
postSynaptic['URBL'][nextState] += 1
def OLQVR():
postSynaptic['CEPVR'][nextState] += 1
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIH'][nextState] += 2
postSynaptic['RIPR'][nextState] += 2
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 4
postSynaptic['RMER'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 4
postSynaptic['URBR'][nextState] += 1
def PDA():
postSynaptic['AS11'][nextState] += 1
postSynaptic['DA9'][nextState] += 1
postSynaptic['DD6'][nextState] += 1
postSynaptic['MDL21'][nextState] += 2
postSynaptic['PVNR'][nextState] += 1
postSynaptic['VD13'][nextState] += 3
def PDB():
postSynaptic['AS11'][nextState] += 2
postSynaptic['MVL22'][nextState] += 1
postSynaptic['MVR21'][nextState] += 1
postSynaptic['RID'][nextState] += 2
postSynaptic['VD13'][nextState] += 2
def PDEL():
postSynaptic['AVKL'][nextState] += 6
postSynaptic['DVA'][nextState] += 24
postSynaptic['PDER'][nextState] += 1
postSynaptic['PDER'][nextState] += 3
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVM'][nextState] += 2
postSynaptic['PVM'][nextState] += 1
postSynaptic['PVR'][nextState] += 2
postSynaptic['VA9'][nextState] += 1
postSynaptic['VD11'][nextState] += 1
def PDER():
postSynaptic['AVKL'][nextState] += 16
postSynaptic['DVA'][nextState] += 35
postSynaptic['PDEL'][nextState] += 3
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVM'][nextState] += 1
postSynaptic['VA8'][nextState] += 1
postSynaptic['VD9'][nextState] += 1
def PHAL():
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVFL'][nextState] += 3
postSynaptic['AVG'][nextState] += 5
postSynaptic['AVHL'][nextState] += 1
postSynaptic['AVHR'][nextState] += 1
postSynaptic['DVA'][nextState] += 2
postSynaptic['PHAR'][nextState] += 5
postSynaptic['PHAR'][nextState] += 2
postSynaptic['PHBL'][nextState] += 5
postSynaptic['PHBR'][nextState] += 5
postSynaptic['PVQL'][nextState] += 2
def PHAR():
postSynaptic['AVG'][nextState] += 3
postSynaptic['AVHR'][nextState] += 1
postSynaptic['DA8'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['PHAL'][nextState] += 6
postSynaptic['PHAL'][nextState] += 2
postSynaptic['PHBL'][nextState] += 1
postSynaptic['PHBR'][nextState] += 5
postSynaptic['PVPL'][nextState] += 3
postSynaptic['PVQL'][nextState] += 2
def PHBL():
postSynaptic['AVAL'][nextState] += 9
postSynaptic['AVAR'][nextState] += 6
postSynaptic['AVDL'][nextState] += 1
postSynaptic['PHBR'][nextState] += 1
postSynaptic['PHBR'][nextState] += 3
postSynaptic['PVCL'][nextState] += 13
postSynaptic['VA12'][nextState] += 1
def PHBR():
postSynaptic['AVAL'][nextState] += 7
postSynaptic['AVAR'][nextState] += 7
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVHL'][nextState] += 1
postSynaptic['DA8'][nextState] += 1
postSynaptic['PHBL'][nextState] += 1
postSynaptic['PHBL'][nextState] += 3
postSynaptic['PVCL'][nextState] += 6
postSynaptic['PVCR'][nextState] += 3
postSynaptic['VA12'][nextState] += 2
def PHCL():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['DA9'][nextState] += 7
postSynaptic['DA9'][nextState] += 1
postSynaptic['DVA'][nextState] += 6
postSynaptic['LUAL'][nextState] += 1
postSynaptic['PHCR'][nextState] += 1
postSynaptic['PLML'][nextState] += 1
postSynaptic['PVCL'][nextState] += 2
postSynaptic['VA12'][nextState] += 3
def PHCR():
postSynaptic['AVHR'][nextState] += 1
postSynaptic['DA9'][nextState] += 2
postSynaptic['DVA'][nextState] += 8
postSynaptic['LUAR'][nextState] += 1
postSynaptic['PHCL'][nextState] += 2
postSynaptic['PVCR'][nextState] += 9
postSynaptic['VA12'][nextState] += 2
def PLML():
postSynaptic['HSNL'][nextState] += 1
postSynaptic['LUAL'][nextState] += 1
postSynaptic['PHCL'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
def PLMR():
postSynaptic['AS6'][nextState] += 1
postSynaptic['AVAL'][nextState] += 4
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVDR'][nextState] += 4
postSynaptic['DVA'][nextState] += 5
postSynaptic['HSNR'][nextState] += 1
postSynaptic['LUAR'][nextState] += 1
postSynaptic['PDEL'][nextState] += 2
postSynaptic['PDER'][nextState] += 3
postSynaptic['PVCL'][nextState] += 2
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVR'][nextState] += 2
def PLNL():
postSynaptic['SAADL'][nextState] += 5
postSynaptic['SMBVL'][nextState] += 6
def PLNR():
postSynaptic['SAADR'][nextState] += 4
postSynaptic['SMBVR'][nextState] += 6
def PQR():
postSynaptic['AVAL'][nextState] += 8
postSynaptic['AVAR'][nextState] += 11
postSynaptic['AVDL'][nextState] += 7
postSynaptic['AVDR'][nextState] += 6
postSynaptic['AVG'][nextState] += 1
postSynaptic['LUAR'][nextState] += 1
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVPL'][nextState] += 4
def PVCL():
postSynaptic['AS1'][nextState] += 1
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVAR'][nextState] += 4
postSynaptic['AVBL'][nextState] += 5
postSynaptic['AVBR'][nextState] += 12
postSynaptic['AVDL'][nextState] += 5
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 3
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVJL'][nextState] += 4
postSynaptic['AVJR'][nextState] += 2
postSynaptic['DA2'][nextState] += 1
postSynaptic['DA5'][nextState] += 1
postSynaptic['DA6'][nextState] += 1
postSynaptic['DB2'][nextState] += 3
postSynaptic['DB3'][nextState] += 4
postSynaptic['DB4'][nextState] += 3
postSynaptic['DB5'][nextState] += 2
postSynaptic['DB6'][nextState] += 2
postSynaptic['DB7'][nextState] += 3
postSynaptic['DVA'][nextState] += 5
postSynaptic['PLML'][nextState] += 1
postSynaptic['PVCR'][nextState] += 7
postSynaptic['RID'][nextState] += 5
postSynaptic['RIS'][nextState] += 2
postSynaptic['SIBVL'][nextState] += 2
postSynaptic['VB10'][nextState] += 3
postSynaptic['VB11'][nextState] += 1
postSynaptic['VB3'][nextState] += 1
postSynaptic['VB4'][nextState] += 1
postSynaptic['VB5'][nextState] += 1
postSynaptic['VB6'][nextState] += 2
postSynaptic['VB8'][nextState] += 1
postSynaptic['VB9'][nextState] += 2
def PVCR():
postSynaptic['AQR'][nextState] += 1
postSynaptic['AS2'][nextState] += 1
postSynaptic['AVAL'][nextState] += 12
postSynaptic['AVAR'][nextState] += 10
postSynaptic['AVBL'][nextState] += 8
postSynaptic['AVBR'][nextState] += 6
postSynaptic['AVDL'][nextState] += 5
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVJL'][nextState] += 3
postSynaptic['AVL'][nextState] += 1
postSynaptic['DA9'][nextState] += 1
postSynaptic['DB2'][nextState] += 1
postSynaptic['DB3'][nextState] += 3
postSynaptic['DB4'][nextState] += 4
postSynaptic['DB5'][nextState] += 1
postSynaptic['DB6'][nextState] += 2
postSynaptic['DB7'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['LUAR'][nextState] += 1
postSynaptic['PDEL'][nextState] += 2
postSynaptic['PHCR'][nextState] += 1
postSynaptic['PLMR'][nextState] += 1
postSynaptic['PVCL'][nextState] += 8
postSynaptic['PVDL'][nextState] += 1
postSynaptic['PVR'][nextState] += 1
postSynaptic['PVWL'][nextState] += 2
postSynaptic['PVWR'][nextState] += 2
postSynaptic['RID'][nextState] += 5
postSynaptic['SIBVR'][nextState] += 2
postSynaptic['VA8'][nextState] += 2
postSynaptic['VA9'][nextState] += 1
postSynaptic['VB10'][nextState] += 1
postSynaptic['VB4'][nextState] += 3
postSynaptic['VB6'][nextState] += 2
postSynaptic['VB7'][nextState] += 3
postSynaptic['VB8'][nextState] += 1
def PVDL():
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 6
postSynaptic['DD5'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 6
postSynaptic['VD10'][nextState] += 6
def PVDR():
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 9
postSynaptic['DVA'][nextState] += 3
postSynaptic['PVCL'][nextState] += 13
postSynaptic['PVCR'][nextState] += 10
postSynaptic['PVDL'][nextState] += 1
postSynaptic['VA9'][nextState] += 1
def PVM():
postSynaptic['AVKL'][nextState] += 11
postSynaptic['AVL'][nextState] += 1
postSynaptic['AVM'][nextState] += 1
postSynaptic['DVA'][nextState] += 3
postSynaptic['PDEL'][nextState] += 7
postSynaptic['PDEL'][nextState] += 1
postSynaptic['PDER'][nextState] += 8
postSynaptic['PDER'][nextState] += 1
postSynaptic['PVCL'][nextState] += 2
postSynaptic['PVR'][nextState] += 1
def PVNL():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 3
postSynaptic['AVDL'][nextState] += 3
postSynaptic['AVDR'][nextState] += 3
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['AVG'][nextState] += 1
postSynaptic['AVJL'][nextState] += 5
postSynaptic['AVJR'][nextState] += 5
postSynaptic['AVL'][nextState] += 2
postSynaptic['BDUL'][nextState] += 1
postSynaptic['BDUR'][nextState] += 2
postSynaptic['DD1'][nextState] += 2
postSynaptic['MVL09'][nextState] += 3
postSynaptic['PQR'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVNR'][nextState] += 5
postSynaptic['PVQR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['PVWL'][nextState] += 1
postSynaptic['RIFL'][nextState] += 1
def PVNR():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 3
postSynaptic['AVJL'][nextState] += 4
postSynaptic['AVJR'][nextState] += 1
postSynaptic['AVL'][nextState] += 2
postSynaptic['BDUL'][nextState] += 1
postSynaptic['BDUR'][nextState] += 2
postSynaptic['DD3'][nextState] += 1
postSynaptic['HSNR'][nextState] += 2
postSynaptic['MVL12'][nextState] += 1
postSynaptic['MVL13'][nextState] += 2
postSynaptic['PQR'][nextState] += 2
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVT'][nextState] += 2
postSynaptic['PVWL'][nextState] += 2
postSynaptic['VC2'][nextState] += 1
postSynaptic['VC3'][nextState] += 1
postSynaptic['VD12'][nextState] += 1
postSynaptic['VD6'][nextState] += 1
postSynaptic['VD7'][nextState] += 1
def PVPL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['AQR'][nextState] += 8
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 5
postSynaptic['AVBR'][nextState] += 6
postSynaptic['AVDR'][nextState] += 2
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVHR'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 6
postSynaptic['DVC'][nextState] += 2
postSynaptic['PHAR'][nextState] += 3
postSynaptic['PQR'][nextState] += 4
postSynaptic['PVCR'][nextState] += 3
postSynaptic['PVPR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIGL'][nextState] += 2
postSynaptic['VD13'][nextState] += 2
postSynaptic['VD3'][nextState] += 1
def PVPR():
postSynaptic['ADFR'][nextState] += 1
postSynaptic['AQR'][nextState] += 11
postSynaptic['ASHR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 4
postSynaptic['AVBR'][nextState] += 5
postSynaptic['AVHL'][nextState] += 3
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVL'][nextState] += 4
postSynaptic['DD2'][nextState] += 1
postSynaptic['DVC'][nextState] += 14
postSynaptic['PVCL'][nextState] += 4
postSynaptic['PVCR'][nextState] += 7
postSynaptic['PVPL'][nextState] += 1
postSynaptic['PVQR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 2
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RIMR'][nextState] += 1
postSynaptic['RMGR'][nextState] += 1
postSynaptic['VD4'][nextState] += 1
postSynaptic['VD5'][nextState] += 1
def PVQL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['AIAL'][nextState] += 3
postSynaptic['ASJL'][nextState] += 1
postSynaptic['ASKL'][nextState] += 4
postSynaptic['ASKL'][nextState] += 5
postSynaptic['HSNL'][nextState] += 2
postSynaptic['PVQR'][nextState] += 2
postSynaptic['RMGL'][nextState] += 1
def PVQR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['AIAR'][nextState] += 7
postSynaptic['ASER'][nextState] += 1
postSynaptic['ASKR'][nextState] += 8
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['AVL'][nextState] += 1
postSynaptic['AWAR'][nextState] += 2
postSynaptic['DD1'][nextState] += 1
postSynaptic['DVC'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVNL'][nextState] += 1
postSynaptic['PVQL'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIFR'][nextState] += 1
postSynaptic['VD1'][nextState] += 1
def PVR():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['ALML'][nextState] += 1
postSynaptic['AS6'][nextState] += 1
postSynaptic['AVBL'][nextState] += 4
postSynaptic['AVBR'][nextState] += 4
postSynaptic['AVJL'][nextState] += 3
postSynaptic['AVJR'][nextState] += 2
postSynaptic['AVKL'][nextState] += 1
postSynaptic['DA9'][nextState] += 1
postSynaptic['DB2'][nextState] += 1
postSynaptic['DB3'][nextState] += 1
postSynaptic['DVA'][nextState] += 3
postSynaptic['IL1DL'][nextState] += 1
postSynaptic['IL1DR'][nextState] += 1
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['LUAL'][nextState] += 1
postSynaptic['LUAR'][nextState] += 1
postSynaptic['PDEL'][nextState] += 1
postSynaptic['PDER'][nextState] += 1
postSynaptic['PLMR'][nextState] += 2
postSynaptic['PVCR'][nextState] += 1
postSynaptic['RIPL'][nextState] += 3
postSynaptic['RIPR'][nextState] += 3
postSynaptic['SABD'][nextState] += 1
postSynaptic['URADL'][nextState] += 1
def PVT():
postSynaptic['AIBL'][nextState] += 3
postSynaptic['AIBR'][nextState] += 5
postSynaptic['AVKL'][nextState] += 9
postSynaptic['AVKR'][nextState] += 7
postSynaptic['AVL'][nextState] += 2
postSynaptic['DVC'][nextState] += 2
postSynaptic['PVPL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 2
postSynaptic['RIGR'][nextState] += 3
postSynaptic['RIH'][nextState] += 1
postSynaptic['RMEV'][nextState] += 1
postSynaptic['RMFL'][nextState] += 2
postSynaptic['RMFR'][nextState] += 3
postSynaptic['SMBDR'][nextState] += 1
def PVWL():
postSynaptic['AVJL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 2
postSynaptic['PVT'][nextState] += 2
postSynaptic['PVWR'][nextState] += 1
postSynaptic['VA12'][nextState] += 1
def PVWR():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVDR'][nextState] += 1
postSynaptic['PVCR'][nextState] += 2
postSynaptic['PVT'][nextState] += 1
postSynaptic['VA12'][nextState] += 1
def RIAL():
postSynaptic['CEPVL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIVL'][nextState] += 2
postSynaptic['RIVR'][nextState] += 4
postSynaptic['RMDDL'][nextState] += 12
postSynaptic['RMDDR'][nextState] += 7
postSynaptic['RMDL'][nextState] += 6
postSynaptic['RMDR'][nextState] += 6
postSynaptic['RMDVL'][nextState] += 9
postSynaptic['RMDVR'][nextState] += 11
postSynaptic['SIADL'][nextState] += 2
postSynaptic['SMDDL'][nextState] += 8
postSynaptic['SMDDR'][nextState] += 10
postSynaptic['SMDVL'][nextState] += 6
postSynaptic['SMDVR'][nextState] += 11
def RIAR():
postSynaptic['CEPVR'][nextState] += 1
postSynaptic['IL1R'][nextState] += 1
postSynaptic['RIAL'][nextState] += 4
postSynaptic['RIVL'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 10
postSynaptic['RMDDR'][nextState] += 11
postSynaptic['RMDL'][nextState] += 3
postSynaptic['RMDR'][nextState] += 8
postSynaptic['RMDVL'][nextState] += 12
postSynaptic['RMDVR'][nextState] += 10
postSynaptic['SAADR'][nextState] += 1
postSynaptic['SIADL'][nextState] += 1
postSynaptic['SIADR'][nextState] += 1
postSynaptic['SIAVL'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 7
postSynaptic['SMDDR'][nextState] += 7
postSynaptic['SMDVL'][nextState] += 13
postSynaptic['SMDVR'][nextState] += 7
def RIBL():
postSynaptic['AIBR'][nextState] += 2
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVDR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 5
postSynaptic['BAGR'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 2
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIAL'][nextState] += 3
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 3
postSynaptic['RIGL'][nextState] += 1
postSynaptic['SIADL'][nextState] += 1
postSynaptic['SIAVL'][nextState] += 1
postSynaptic['SIBDL'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
postSynaptic['SIBVR'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 4
def RIBR():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIZR'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 3
postSynaptic['AVER'][nextState] += 1
postSynaptic['BAGL'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 2
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIAR'][nextState] += 2
postSynaptic['RIBL'][nextState] += 3
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 2
postSynaptic['RIH'][nextState] += 1
postSynaptic['SIADR'][nextState] += 1
postSynaptic['SIAVR'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 1
postSynaptic['SIBVR'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 2
def RICL():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['ASHL'][nextState] += 2
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 6
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 2
postSynaptic['AWBR'][nextState] += 1
postSynaptic['RIML'][nextState] += 1
postSynaptic['RIMR'][nextState] += 3
postSynaptic['RIVR'][nextState] += 1
postSynaptic['RMFR'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 2
postSynaptic['SMDDL'][nextState] += 3
postSynaptic['SMDDR'][nextState] += 3
postSynaptic['SMDVR'][nextState] += 1
def RICR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 2
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 5
postSynaptic['AVKL'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 4
postSynaptic['SMDDR'][nextState] += 3
postSynaptic['SMDVL'][nextState] += 2
postSynaptic['SMDVR'][nextState] += 1
def RID():
postSynaptic['ALA'][nextState] += 1
postSynaptic['AS2'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['DA6'][nextState] += 3
postSynaptic['DA9'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DD1'][nextState] += 4
postSynaptic['DD2'][nextState] += 4
postSynaptic['DD3'][nextState] += 3
postSynaptic['MDL14'][nextState] += -2
postSynaptic['MDL21'][nextState] += -3
postSynaptic['PDB'][nextState] += 2
postSynaptic['VD13'][nextState] += 1
postSynaptic['VD5'][nextState] += 1
def RIFL():
postSynaptic['ALML'][nextState] += 2
postSynaptic['AVBL'][nextState] += 10
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVG'][nextState] += 1
postSynaptic['AVHR'][nextState] += 1
postSynaptic['AVJR'][nextState] += 2
postSynaptic['PVPL'][nextState] += 3
postSynaptic['RIML'][nextState] += 4
postSynaptic['VD1'][nextState] += 1
def RIFR():
postSynaptic['ASHR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 17
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVG'][nextState] += 1
postSynaptic['AVHL'][nextState] += 1
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVJR'][nextState] += 2
postSynaptic['HSNR'][nextState] += 1
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 4
postSynaptic['RIMR'][nextState] += 4
postSynaptic['RIPR'][nextState] += 1
def RIGL():
postSynaptic['AIBR'][nextState] += 3
postSynaptic['AIZR'][nextState] += 1
postSynaptic['ALNL'][nextState] += 1
postSynaptic['AQR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 2
postSynaptic['BAGR'][nextState] += 2
postSynaptic['DVC'][nextState] += 1
postSynaptic['OLLL'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RIGR'][nextState] += 3
postSynaptic['RIR'][nextState] += 2
postSynaptic['RMEL'][nextState] += 2
postSynaptic['RMHR'][nextState] += 3
postSynaptic['URYDL'][nextState] += 1
postSynaptic['URYVL'][nextState] += 1
postSynaptic['VB2'][nextState] += 1
postSynaptic['VD1'][nextState] += 2
def RIGR():
postSynaptic['AIBL'][nextState] += 3
postSynaptic['ALNR'][nextState] += 1
postSynaptic['AQR'][nextState] += 1
postSynaptic['AVER'][nextState] += 2
postSynaptic['AVKL'][nextState] += 4
postSynaptic['AVKR'][nextState] += 2
postSynaptic['BAGL'][nextState] += 1
postSynaptic['OLLR'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RIBR'][nextState] += 2
postSynaptic['RIGL'][nextState] += 3
postSynaptic['RIR'][nextState] += 1
postSynaptic['RMHL'][nextState] += 4
postSynaptic['URYDR'][nextState] += 1
postSynaptic['URYVR'][nextState] += 1
def RIH():
postSynaptic['ADFR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 4
postSynaptic['AIZR'][nextState] += 4
postSynaptic['AUAR'][nextState] += 1
postSynaptic['BAGR'][nextState] += 1
postSynaptic['CEPDL'][nextState] += 2
postSynaptic['CEPDR'][nextState] += 2
postSynaptic['CEPVL'][nextState] += 2
postSynaptic['CEPVR'][nextState] += 2
postSynaptic['FLPL'][nextState] += 1
postSynaptic['IL2L'][nextState] += 2
postSynaptic['IL2R'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 4
postSynaptic['OLQDR'][nextState] += 2
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['OLQVR'][nextState] += 6
postSynaptic['RIAL'][nextState] += 10
postSynaptic['RIAR'][nextState] += 8
postSynaptic['RIBL'][nextState] += 5
postSynaptic['RIBR'][nextState] += 4
postSynaptic['RIPL'][nextState] += 4
postSynaptic['RIPR'][nextState] += 6
postSynaptic['RMER'][nextState] += 2
postSynaptic['RMEV'][nextState] += 1
postSynaptic['URYVR'][nextState] += 1
def RIML():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AIYL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 3
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AVER'][nextState] += 3
postSynaptic['MDR05'][nextState] += 2
postSynaptic['MVR05'][nextState] += 2
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMFR'][nextState] += 1
postSynaptic['SAADR'][nextState] += 1
postSynaptic['SAAVL'][nextState] += 3
postSynaptic['SAAVR'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 5
postSynaptic['SMDVL'][nextState] += 1
def RIMR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['AIBL'][nextState] += 4
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AIYR'][nextState] += 1
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 5
postSynaptic['AVEL'][nextState] += 3
postSynaptic['AVER'][nextState] += 2
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['MDL05'][nextState] += 1
postSynaptic['MDL07'][nextState] += 1
postSynaptic['MVL05'][nextState] += 1
postSynaptic['MVL07'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIS'][nextState] += 2
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMFL'][nextState] += 1
postSynaptic['RMFR'][nextState] += 1
postSynaptic['SAAVL'][nextState] += 3
postSynaptic['SAAVR'][nextState] += 3
postSynaptic['SMDDL'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 4
def RIPL():
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['RMED'][nextState] += 1
def RIPR():
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['RMED'][nextState] += 1
def RIR():
postSynaptic['AFDR'][nextState] += 1
postSynaptic['AIZL'][nextState] += 3
postSynaptic['AIZR'][nextState] += 5
postSynaptic['AUAL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 1
postSynaptic['BAGL'][nextState] += 1
postSynaptic['BAGR'][nextState] += 2
postSynaptic['DVA'][nextState] += 2
postSynaptic['HSNL'][nextState] += 1
postSynaptic['PVPL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 5
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIGL'][nextState] += 1
postSynaptic['URXL'][nextState] += 5
postSynaptic['URXR'][nextState] += 1
def RIS():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 7
postSynaptic['AVER'][nextState] += 7
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 4
postSynaptic['AVL'][nextState] += 2
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['CEPVL'][nextState] += 2
postSynaptic['CEPVR'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['OLLR'][nextState] += 1
postSynaptic['RIBL'][nextState] += 3
postSynaptic['RIBR'][nextState] += 5
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 5
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDL'][nextState] += 2
postSynaptic['RMDR'][nextState] += 4
postSynaptic['SMDDL'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 3
postSynaptic['SMDVL'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 1
postSynaptic['URYVR'][nextState] += 1
def RIVL():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['MVR05'][nextState] += -2
postSynaptic['MVR06'][nextState] += -2
postSynaptic['MVR08'][nextState] += -3
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIVR'][nextState] += 2
postSynaptic['RMDL'][nextState] += 2
postSynaptic['SAADR'][nextState] += 3
postSynaptic['SDQR'][nextState] += 2
postSynaptic['SIAVR'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 1
def RIVR():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['MVL05'][nextState] += -2
postSynaptic['MVL06'][nextState] += -2
postSynaptic['MVL08'][nextState] += -2
postSynaptic['MVR04'][nextState] += -2
postSynaptic['MVR06'][nextState] += -2
postSynaptic['RIAL'][nextState] += 2
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIVL'][nextState] += 2
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['RMEV'][nextState] += 1
postSynaptic['SAADL'][nextState] += 2
postSynaptic['SDQR'][nextState] += 2
postSynaptic['SIAVL'][nextState] += 2
postSynaptic['SMDDL'][nextState] += 2
postSynaptic['SMDVR'][nextState] += 4
def RMDDL():
postSynaptic['MDR01'][nextState] += 1
postSynaptic['MDR02'][nextState] += 1
postSynaptic['MDR03'][nextState] += 1
postSynaptic['MDR04'][nextState] += 1
postSynaptic['MDR08'][nextState] += 2
postSynaptic['MVR01'][nextState] += 1
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 7
postSynaptic['SMDDL'][nextState] += 1
def RMDDR():
postSynaptic['MDL01'][nextState] += 1
postSynaptic['MDL02'][nextState] += 1
postSynaptic['MDL03'][nextState] += 2
postSynaptic['MDL04'][nextState] += 1
postSynaptic['MDR04'][nextState] += 1
postSynaptic['MVR01'][nextState] += 1
postSynaptic['MVR02'][nextState] += 1
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 12
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['SAADR'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['URYDL'][nextState] += 1
def RMDL():
postSynaptic['MDL03'][nextState] += 1
postSynaptic['MDL05'][nextState] += 2
postSynaptic['MDR01'][nextState] += 1
postSynaptic['MDR03'][nextState] += 1
postSynaptic['MVL01'][nextState] += 1
postSynaptic['MVR01'][nextState] += 1
postSynaptic['MVR03'][nextState] += 1
postSynaptic['MVR05'][nextState] += 2
postSynaptic['MVR07'][nextState] += 1
postSynaptic['OLLR'][nextState] += 2
postSynaptic['RIAL'][nextState] += 4
postSynaptic['RIAR'][nextState] += 3
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMDR'][nextState] += 3
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMER'][nextState] += 1
postSynaptic['RMFL'][nextState] += 1
def RMDR():
postSynaptic['AVKL'][nextState] += 1
postSynaptic['MDL03'][nextState] += 1
postSynaptic['MDL05'][nextState] += 1
postSynaptic['MDR05'][nextState] += 1
postSynaptic['MVL03'][nextState] += 1
postSynaptic['MVL05'][nextState] += 1
postSynaptic['RIAL'][nextState] += 3
postSynaptic['RIAR'][nextState] += 7
postSynaptic['RIMR'][nextState] += 2
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 1
def RMDVL():
postSynaptic['AVER'][nextState] += 1
postSynaptic['MDR01'][nextState] += 1
postSynaptic['MVL04'][nextState] += 1
postSynaptic['MVR01'][nextState] += 1
postSynaptic['MVR02'][nextState] += 1
postSynaptic['MVR03'][nextState] += 1
postSynaptic['MVR04'][nextState] += 1
postSynaptic['MVR05'][nextState] += 1
postSynaptic['MVR06'][nextState] += 1
postSynaptic['MVR08'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 6
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['SAAVL'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 1
def RMDVR():
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['MDL01'][nextState] += 1
postSynaptic['MVL01'][nextState] += 1
postSynaptic['MVL02'][nextState] += 1
postSynaptic['MVL03'][nextState] += 1
postSynaptic['MVL04'][nextState] += 1
postSynaptic['MVL05'][nextState] += 1
postSynaptic['MVL06'][nextState] += 1
postSynaptic['MVL08'][nextState] += 1
postSynaptic['MVR04'][nextState] += 1
postSynaptic['MVR06'][nextState] += 1
postSynaptic['MVR08'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 4
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['SAAVR'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 1
postSynaptic['SIBVR'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 1
def RMED():
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['MVL02'][nextState] += -4
postSynaptic['MVL04'][nextState] += -4
postSynaptic['MVL06'][nextState] += -4
postSynaptic['MVR02'][nextState] += -4
postSynaptic['MVR04'][nextState] += -4
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIPL'][nextState] += 1
postSynaptic['RIPR'][nextState] += 1
postSynaptic['RMEV'][nextState] += 2
def RMEL():
postSynaptic['MDR01'][nextState] += -5
postSynaptic['MDR03'][nextState] += -5
postSynaptic['MVR01'][nextState] += -5
postSynaptic['MVR03'][nextState] += -5
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RMEV'][nextState] += 1
def RMER():
postSynaptic['MDL01'][nextState] += -7
postSynaptic['MDL03'][nextState] += -7
postSynaptic['MVL01'][nextState] += -7
postSynaptic['RMEV'][nextState] += 1
def RMEV():
postSynaptic['AVEL'][nextState] += 1
postSynaptic['AVER'][nextState] += 1
postSynaptic['IL1DL'][nextState] += 1
postSynaptic['IL1DR'][nextState] += 1
postSynaptic['MDL02'][nextState] += -3
postSynaptic['MDL04'][nextState] += -3
postSynaptic['MDL06'][nextState] += -3
postSynaptic['MDR02'][nextState] += -3
postSynaptic['MDR04'][nextState] += -3
postSynaptic['RMED'][nextState] += 2
postSynaptic['RMEL'][nextState] += 1
postSynaptic['RMER'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 1
def RMFL():
postSynaptic['AVKL'][nextState] += 4
postSynaptic['AVKR'][nextState] += 4
postSynaptic['MDR03'][nextState] += 1
postSynaptic['MVR01'][nextState] += 1
postSynaptic['MVR03'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RMDR'][nextState] += 3
postSynaptic['RMGR'][nextState] += 1
postSynaptic['URBR'][nextState] += 1
def RMFR():
postSynaptic['AVKL'][nextState] += 3
postSynaptic['AVKR'][nextState] += 3
postSynaptic['RMDL'][nextState] += 2
def RMGL():
postSynaptic['ADAL'][nextState] += 1
postSynaptic['ADLL'][nextState] += 1
postSynaptic['AIBR'][nextState] += 1
postSynaptic['ALML'][nextState] += 1
postSynaptic['ALNL'][nextState] += 1
postSynaptic['ASHL'][nextState] += 2
postSynaptic['ASKL'][nextState] += 2
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AWBL'][nextState] += 1
postSynaptic['CEPDL'][nextState] += 1
postSynaptic['IL2L'][nextState] += 1
postSynaptic['MDL05'][nextState] += 2
postSynaptic['MVL05'][nextState] += 2
postSynaptic['RID'][nextState] += 1
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 3
postSynaptic['RMDVL'][nextState] += 3
postSynaptic['RMHL'][nextState] += 3
postSynaptic['RMHR'][nextState] += 1
postSynaptic['SIAVL'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 3
postSynaptic['SIBVR'][nextState] += 1
postSynaptic['SMBVL'][nextState] += 1
postSynaptic['URXL'][nextState] += 2
def RMGR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['AIMR'][nextState] += 1
postSynaptic['ALNR'][nextState] += 1
postSynaptic['ASHR'][nextState] += 2
postSynaptic['ASKR'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVDL'][nextState] += 1
postSynaptic['AVER'][nextState] += 3
postSynaptic['AVJL'][nextState] += 1
postSynaptic['AWBR'][nextState] += 1
postSynaptic['IL2R'][nextState] += 1
postSynaptic['MDR05'][nextState] += 1
postSynaptic['MVR05'][nextState] += 1
postSynaptic['MVR07'][nextState] += 1
postSynaptic['RIR'][nextState] += 1
postSynaptic['RMDL'][nextState] += 4
postSynaptic['RMDR'][nextState] += 2
postSynaptic['RMDVR'][nextState] += 5
postSynaptic['RMHR'][nextState] += 1
postSynaptic['URXR'][nextState] += 2
def RMHL():
postSynaptic['MDR01'][nextState] += 2
postSynaptic['MDR03'][nextState] += 3
postSynaptic['MVR01'][nextState] += 2
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMGL'][nextState] += 3
postSynaptic['SIBVR'][nextState] += 1
def RMHR():
postSynaptic['MDL01'][nextState] += 2
postSynaptic['MDL03'][nextState] += 2
postSynaptic['MDL05'][nextState] += 2
postSynaptic['MVL01'][nextState] += 2
postSynaptic['RMER'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
postSynaptic['RMGR'][nextState] += 1
def SAADL():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 6
postSynaptic['RIML'][nextState] += 3
postSynaptic['RIMR'][nextState] += 6
postSynaptic['RMGR'][nextState] += 1
postSynaptic['SMBDL'][nextState] += 1
def SAADR():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AVAR'][nextState] += 3
postSynaptic['OLLL'][nextState] += 1
postSynaptic['RIML'][nextState] += 4
postSynaptic['RIMR'][nextState] += 5
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMFL'][nextState] += 1
postSynaptic['RMGL'][nextState] += 1
def SAAVL():
postSynaptic['AIBL'][nextState] += 1
postSynaptic['ALNL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 16
postSynaptic['OLLR'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RIMR'][nextState] += 12
postSynaptic['RMDVL'][nextState] += 2
postSynaptic['RMFR'][nextState] += 2
postSynaptic['SMBVR'][nextState] += 3
postSynaptic['SMDDR'][nextState] += 8
def SAAVR():
postSynaptic['AVAR'][nextState] += 13
postSynaptic['RIML'][nextState] += 5
postSynaptic['RIMR'][nextState] += 2
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['SMBVL'][nextState] += 2
postSynaptic['SMDDL'][nextState] += 6
def SABD():
postSynaptic['AVAL'][nextState] += 4
postSynaptic['VA2'][nextState] += 4
postSynaptic['VA3'][nextState] += 2
postSynaptic['VA4'][nextState] += 1
def SABVL():
postSynaptic['AVAR'][nextState] += 3
postSynaptic['DA1'][nextState] += 2
postSynaptic['DA2'][nextState] += 1
def SABVR():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['DA1'][nextState] += 3
def SDQL():
postSynaptic['ALML'][nextState] += 2
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 3
postSynaptic['AVEL'][nextState] += 1
postSynaptic['FLPL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIS'][nextState] += 3
postSynaptic['RMFL'][nextState] += 1
postSynaptic['SDQR'][nextState] += 1
def SDQR():
postSynaptic['ADLL'][nextState] += 1
postSynaptic['AIBL'][nextState] += 2
postSynaptic['AVAL'][nextState] += 3
postSynaptic['AVBL'][nextState] += 7
postSynaptic['AVBR'][nextState] += 4
postSynaptic['DVA'][nextState] += 3
postSynaptic['RICR'][nextState] += 1
postSynaptic['RIVL'][nextState] += 2
postSynaptic['RIVR'][nextState] += 2
postSynaptic['RMHL'][nextState] += 2
postSynaptic['RMHR'][nextState] += 1
postSynaptic['SDQL'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
def SIADL():
postSynaptic['RIBL'][nextState] += 1
def SIADR():
postSynaptic['RIBR'][nextState] += 1
def SIAVL():
postSynaptic['RIBL'][nextState] += 1
def SIAVR():
postSynaptic['RIBR'][nextState] += 1
def SIBDL():
postSynaptic['RIBL'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
def SIBDR():
postSynaptic['AIML'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['SIBVR'][nextState] += 1
def SIBVL():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['SDQR'][nextState] += 1
postSynaptic['SIBDL'][nextState] += 1
def SIBVR():
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RMHL'][nextState] += 1
postSynaptic['SIBDR'][nextState] += 1
def SMBDL():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 1
postSynaptic['MDR01'][nextState] += 2
postSynaptic['MDR02'][nextState] += 2
postSynaptic['MDR03'][nextState] += 2
postSynaptic['MDR04'][nextState] += 2
postSynaptic['MDR06'][nextState] += 3
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RMED'][nextState] += 3
postSynaptic['SAADL'][nextState] += 1
postSynaptic['SAAVR'][nextState] += 1
def SMBDR():
postSynaptic['ALNL'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 2
postSynaptic['MDL02'][nextState] += 1
postSynaptic['MDL03'][nextState] += 1
postSynaptic['MDL04'][nextState] += 1
postSynaptic['MDL06'][nextState] += 2
postSynaptic['MDR04'][nextState] += 1
postSynaptic['MDR08'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RMED'][nextState] += 4
postSynaptic['SAAVL'][nextState] += 3
def SMBVL():
postSynaptic['MVL01'][nextState] += 1
postSynaptic['MVL02'][nextState] += 1
postSynaptic['MVL03'][nextState] += 1
postSynaptic['MVL04'][nextState] += 1
postSynaptic['MVL05'][nextState] += 1
postSynaptic['MVL06'][nextState] += 1
postSynaptic['MVL08'][nextState] += 1
postSynaptic['PLNL'][nextState] += 1
postSynaptic['RMEV'][nextState] += 5
postSynaptic['SAADL'][nextState] += 3
postSynaptic['SAAVR'][nextState] += 2
def SMBVR():
postSynaptic['AVKL'][nextState] += 1
postSynaptic['AVKR'][nextState] += 1
postSynaptic['MVR01'][nextState] += 1
postSynaptic['MVR02'][nextState] += 1
postSynaptic['MVR03'][nextState] += 1
postSynaptic['MVR04'][nextState] += 1
postSynaptic['MVR06'][nextState] += 1
postSynaptic['MVR07'][nextState] += 1
postSynaptic['RMEV'][nextState] += 3
postSynaptic['SAADR'][nextState] += 4
postSynaptic['SAAVL'][nextState] += 3
def SMDDL():
postSynaptic['MDL04'][nextState] += 1
postSynaptic['MDL06'][nextState] += 1
postSynaptic['MDL08'][nextState] += 1
postSynaptic['MDR02'][nextState] += 1
postSynaptic['MDR03'][nextState] += 1
postSynaptic['MDR04'][nextState] += 1
postSynaptic['MDR05'][nextState] += 1
postSynaptic['MDR06'][nextState] += 1
postSynaptic['MDR07'][nextState] += 1
postSynaptic['MVL02'][nextState] += 1
postSynaptic['MVL04'][nextState] += 1
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 2
def SMDDR():
postSynaptic['MDL04'][nextState] += 1
postSynaptic['MDL05'][nextState] += 1
postSynaptic['MDL06'][nextState] += 1
postSynaptic['MDL08'][nextState] += 1
postSynaptic['MDR04'][nextState] += 1
postSynaptic['MDR06'][nextState] += 1
postSynaptic['MVR02'][nextState] += 1
postSynaptic['RIAL'][nextState] += 2
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['VD1'][nextState] += 1
def SMDVL():
postSynaptic['MVL03'][nextState] += 1
postSynaptic['MVL06'][nextState] += 1
postSynaptic['MVR02'][nextState] += 1
postSynaptic['MVR03'][nextState] += 1
postSynaptic['MVR04'][nextState] += 1
postSynaptic['MVR06'][nextState] += 1
postSynaptic['PVR'][nextState] += 1
postSynaptic['RIAL'][nextState] += 3
postSynaptic['RIAR'][nextState] += 8
postSynaptic['RIBR'][nextState] += 2
postSynaptic['RIS'][nextState] += 1
postSynaptic['RIVL'][nextState] += 2
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 4
postSynaptic['SMDVR'][nextState] += 1
def SMDVR():
postSynaptic['MVL02'][nextState] += 1
postSynaptic['MVL03'][nextState] += 1
postSynaptic['MVL04'][nextState] += 1
postSynaptic['MVR07'][nextState] += 1
postSynaptic['RIAL'][nextState] += 7
postSynaptic['RIAR'][nextState] += 5
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RIVR'][nextState] += 1
postSynaptic['RIVR'][nextState] += 2
postSynaptic['RMDDL'][nextState] += 1
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['SMDDL'][nextState] += 2
postSynaptic['SMDVL'][nextState] += 1
postSynaptic['VB1'][nextState] += 1
def URADL():
postSynaptic['IL1DL'][nextState] += 2
postSynaptic['MDL02'][nextState] += 2
postSynaptic['MDL03'][nextState] += 2
postSynaptic['MDL04'][nextState] += 2
postSynaptic['RIPL'][nextState] += 3
postSynaptic['RMEL'][nextState] += 1
def URADR():
postSynaptic['IL1DR'][nextState] += 1
postSynaptic['MDR01'][nextState] += 3
postSynaptic['MDR02'][nextState] += 2
postSynaptic['MDR03'][nextState] += 3
postSynaptic['RIPR'][nextState] += 3
postSynaptic['RMDVR'][nextState] += 1
postSynaptic['RMED'][nextState] += 1
postSynaptic['RMER'][nextState] += 1
postSynaptic['URYDR'][nextState] += 1
def URAVL():
postSynaptic['MVL01'][nextState] += 2
postSynaptic['MVL02'][nextState] += 2
postSynaptic['MVL03'][nextState] += 3
postSynaptic['MVL04'][nextState] += 2
postSynaptic['RIPL'][nextState] += 3
postSynaptic['RMEL'][nextState] += 1
postSynaptic['RMER'][nextState] += 1
postSynaptic['RMEV'][nextState] += 2
def URAVR():
postSynaptic['IL1R'][nextState] += 1
postSynaptic['MVR01'][nextState] += 2
postSynaptic['MVR02'][nextState] += 2
postSynaptic['MVR03'][nextState] += 2
postSynaptic['MVR04'][nextState] += 2
postSynaptic['RIPR'][nextState] += 3
postSynaptic['RMDVL'][nextState] += 1
postSynaptic['RMER'][nextState] += 2
postSynaptic['RMEV'][nextState] += 2
def URBL():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['CEPDL'][nextState] += 1
postSynaptic['IL1L'][nextState] += 1
postSynaptic['OLQDL'][nextState] += 1
postSynaptic['OLQVL'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 1
postSynaptic['SIAVL'][nextState] += 1
postSynaptic['SMBDR'][nextState] += 1
postSynaptic['URXL'][nextState] += 2
def URBR():
postSynaptic['ADAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['CEPDR'][nextState] += 1
postSynaptic['IL1R'][nextState] += 3
postSynaptic['IL2R'][nextState] += 1
postSynaptic['OLQDR'][nextState] += 1
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RICR'][nextState] += 1
postSynaptic['RMDL'][nextState] += 1
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMFL'][nextState] += 1
postSynaptic['SIAVR'][nextState] += 2
postSynaptic['SMBDL'][nextState] += 1
postSynaptic['URXR'][nextState] += 4
def URXL():
postSynaptic['ASHL'][nextState] += 1
postSynaptic['AUAL'][nextState] += 5
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVEL'][nextState] += 4
postSynaptic['AVJR'][nextState] += 1
postSynaptic['RIAL'][nextState] += 8
postSynaptic['RICL'][nextState] += 1
postSynaptic['RIGL'][nextState] += 3
postSynaptic['RMGL'][nextState] += 2
postSynaptic['RMGL'][nextState] += 1
def URXR():
postSynaptic['AUAR'][nextState] += 4
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['AVER'][nextState] += 2
postSynaptic['IL2R'][nextState] += 1
postSynaptic['OLQVR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 3
postSynaptic['RIGR'][nextState] += 2
postSynaptic['RIPR'][nextState] += 3
postSynaptic['RMDR'][nextState] += 1
postSynaptic['RMGR'][nextState] += 2
postSynaptic['SIAVR'][nextState] += 1
def URYDL():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVER'][nextState] += 2
postSynaptic['RIBL'][nextState] += 1
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 4
postSynaptic['RMDVL'][nextState] += 6
postSynaptic['SMDDL'][nextState] += 1
postSynaptic['SMDDR'][nextState] += 1
def URYDR():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVEL'][nextState] += 2
postSynaptic['AVER'][nextState] += 2
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 3
postSynaptic['RMDVR'][nextState] += 5
postSynaptic['SMDDL'][nextState] += 4
def URYVL():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVER'][nextState] += 5
postSynaptic['IL1VL'][nextState] += 1
postSynaptic['RIAL'][nextState] += 1
postSynaptic['RIBL'][nextState] += 2
postSynaptic['RIGL'][nextState] += 1
postSynaptic['RIH'][nextState] += 1
postSynaptic['RIS'][nextState] += 1
postSynaptic['RMDDL'][nextState] += 4
postSynaptic['RMDVR'][nextState] += 2
postSynaptic['SIBVR'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 4
def URYVR():
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVEL'][nextState] += 6
postSynaptic['IL1VR'][nextState] += 1
postSynaptic['RIAR'][nextState] += 1
postSynaptic['RIBR'][nextState] += 1
postSynaptic['RIGR'][nextState] += 1
postSynaptic['RMDDR'][nextState] += 6
postSynaptic['RMDVL'][nextState] += 4
postSynaptic['SIBDR'][nextState] += 1
postSynaptic['SIBVL'][nextState] += 1
postSynaptic['SMDVL'][nextState] += 3
def VA1():
postSynaptic['AVAL'][nextState] += 3
postSynaptic['DA2'][nextState] += 2
postSynaptic['DD1'][nextState] += 9
postSynaptic['MVL07'][nextState] += 3
postSynaptic['MVL08'][nextState] += 3
postSynaptic['MVR07'][nextState] += 3
postSynaptic['MVR08'][nextState] += 3
postSynaptic['VD1'][nextState] += 2
def VA2():
postSynaptic['AVAL'][nextState] += 5
postSynaptic['DD1'][nextState] += 13
postSynaptic['MVL07'][nextState] += 5
postSynaptic['MVL10'][nextState] += 5
postSynaptic['MVR07'][nextState] += 5
postSynaptic['MVR10'][nextState] += 5
postSynaptic['SABD'][nextState] += 3
postSynaptic['VA3'][nextState] += 2
postSynaptic['VB1'][nextState] += 2
postSynaptic['VD1'][nextState] += 2
postSynaptic['VD1'][nextState] += 1
postSynaptic['VD2'][nextState] += 11
def VA3():
postSynaptic['AS1'][nextState] += 1
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DD1'][nextState] += 18
postSynaptic['DD2'][nextState] += 11
postSynaptic['MVL09'][nextState] += 5
postSynaptic['MVL10'][nextState] += 5
postSynaptic['MVL12'][nextState] += 5
postSynaptic['MVR09'][nextState] += 5
postSynaptic['MVR10'][nextState] += 5
postSynaptic['MVR12'][nextState] += 5
postSynaptic['SABD'][nextState] += 2
postSynaptic['VA4'][nextState] += 1
postSynaptic['VD2'][nextState] += 3
postSynaptic['VD3'][nextState] += 3
def VA4():
postSynaptic['AS2'][nextState] += 2
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 2
postSynaptic['AVDL'][nextState] += 1
postSynaptic['DA5'][nextState] += 1
postSynaptic['DD2'][nextState] += 21
postSynaptic['MVL11'][nextState] += 6
postSynaptic['MVL12'][nextState] += 6
postSynaptic['MVR11'][nextState] += 6
postSynaptic['MVR12'][nextState] += 6
postSynaptic['SABD'][nextState] += 1
postSynaptic['VB3'][nextState] += 2
postSynaptic['VD4'][nextState] += 3
def VA5():
postSynaptic['AS3'][nextState] += 2
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 3
postSynaptic['DA5'][nextState] += 2
postSynaptic['DD2'][nextState] += 5
postSynaptic['DD3'][nextState] += 13
postSynaptic['MVL11'][nextState] += 5
postSynaptic['MVL14'][nextState] += 5
postSynaptic['MVR11'][nextState] += 5
postSynaptic['MVR14'][nextState] += 5
postSynaptic['VD5'][nextState] += 2
def VA6():
postSynaptic['AVAL'][nextState] += 6
postSynaptic['AVAR'][nextState] += 2
postSynaptic['DD3'][nextState] += 24
postSynaptic['MVL13'][nextState] += 5
postSynaptic['MVL14'][nextState] += 5
postSynaptic['MVR13'][nextState] += 5
postSynaptic['MVR14'][nextState] += 5
postSynaptic['VB5'][nextState] += 2
postSynaptic['VD5'][nextState] += 1
postSynaptic['VD6'][nextState] += 2
def VA7():
postSynaptic['AS5'][nextState] += 1
postSynaptic['AVAL'][nextState] += 2
postSynaptic['AVAR'][nextState] += 4
postSynaptic['DD3'][nextState] += 3
postSynaptic['DD4'][nextState] += 12
postSynaptic['MVL13'][nextState] += 4
postSynaptic['MVL15'][nextState] += 4
postSynaptic['MVL16'][nextState] += 4
postSynaptic['MVR13'][nextState] += 4
postSynaptic['MVR15'][nextState] += 4
postSynaptic['MVR16'][nextState] += 4
postSynaptic['MVULVA'][nextState] += 4
postSynaptic['VB3'][nextState] += 1
postSynaptic['VD7'][nextState] += 9
def VA8():
postSynaptic['AS6'][nextState] += 1
postSynaptic['AVAL'][nextState] += 10
postSynaptic['AVAR'][nextState] += 4
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DD4'][nextState] += 21
postSynaptic['MVL15'][nextState] += 6
postSynaptic['MVL16'][nextState] += 6
postSynaptic['MVR15'][nextState] += 6
postSynaptic['MVR16'][nextState] += 6
postSynaptic['PDER'][nextState] += 1
postSynaptic['PVCR'][nextState] += 2
postSynaptic['VA8'][nextState] += 1
postSynaptic['VA9'][nextState] += 1
postSynaptic['VB6'][nextState] += 1
postSynaptic['VB8'][nextState] += 1
postSynaptic['VB8'][nextState] += 3
postSynaptic['VB9'][nextState] += 3
postSynaptic['VD7'][nextState] += 5
postSynaptic['VD8'][nextState] += 5
postSynaptic['VD8'][nextState] += 1
def VA9():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DD4'][nextState] += 3
postSynaptic['DD5'][nextState] += 15
postSynaptic['DVB'][nextState] += 1
postSynaptic['DVC'][nextState] += 1
postSynaptic['MVL15'][nextState] += 5
postSynaptic['MVL18'][nextState] += 5
postSynaptic['MVR15'][nextState] += 5
postSynaptic['MVR18'][nextState] += 5
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['VB8'][nextState] += 6
postSynaptic['VB8'][nextState] += 1
postSynaptic['VB9'][nextState] += 4
postSynaptic['VD7'][nextState] += 1
postSynaptic['VD9'][nextState] += 10
def VA10():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 1
postSynaptic['MVL17'][nextState] += 5
postSynaptic['MVL18'][nextState] += 5
postSynaptic['MVR17'][nextState] += 5
postSynaptic['MVR18'][nextState] += 5
def VA11():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['AVAR'][nextState] += 7
postSynaptic['DD6'][nextState] += 10
postSynaptic['MVL19'][nextState] += 5
postSynaptic['MVL20'][nextState] += 5
postSynaptic['MVR19'][nextState] += 5
postSynaptic['MVR20'][nextState] += 5
postSynaptic['PVNR'][nextState] += 2
postSynaptic['VB10'][nextState] += 1
postSynaptic['VD12'][nextState] += 4
def VA12():
postSynaptic['AS11'][nextState] += 2
postSynaptic['AVAR'][nextState] += 1
postSynaptic['DA8'][nextState] += 3
postSynaptic['DA9'][nextState] += 5
postSynaptic['DB7'][nextState] += 4
postSynaptic['DD6'][nextState] += 2
postSynaptic['LUAL'][nextState] += 2
postSynaptic['MVL21'][nextState] += 5
postSynaptic['MVL22'][nextState] += 5
postSynaptic['MVL23'][nextState] += 5
postSynaptic['MVR21'][nextState] += 5
postSynaptic['MVR22'][nextState] += 5
postSynaptic['MVR23'][nextState] += 5
postSynaptic['MVR24'][nextState] += 5
postSynaptic['PHCL'][nextState] += 1
postSynaptic['PHCR'][nextState] += 1
postSynaptic['PVCL'][nextState] += 2
postSynaptic['PVCR'][nextState] += 3
postSynaptic['VA11'][nextState] += 1
postSynaptic['VB11'][nextState] += 1
postSynaptic['VD12'][nextState] += 3
postSynaptic['VD13'][nextState] += 11
def VB1():
postSynaptic['AIBR'][nextState] += 1
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVKL'][nextState] += 4
postSynaptic['DB2'][nextState] += 2
postSynaptic['DD1'][nextState] += 1
postSynaptic['DVA'][nextState] += 1
postSynaptic['MVL07'][nextState] += 1
postSynaptic['MVL08'][nextState] += 1
postSynaptic['MVR07'][nextState] += 1
postSynaptic['MVR08'][nextState] += 1
postSynaptic['RIML'][nextState] += 2
postSynaptic['RMFL'][nextState] += 2
postSynaptic['SAADL'][nextState] += 9
postSynaptic['SAADR'][nextState] += 2
postSynaptic['SABD'][nextState] += 1
postSynaptic['SMDVR'][nextState] += 1
postSynaptic['VA1'][nextState] += 3
postSynaptic['VA3'][nextState] += 1
postSynaptic['VB2'][nextState] += 4
postSynaptic['VD1'][nextState] += 3
postSynaptic['VD2'][nextState] += 1
def VB2():
postSynaptic['AVBL'][nextState] += 3
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DD1'][nextState] += 20
postSynaptic['DD2'][nextState] += 1
postSynaptic['MVL07'][nextState] += 4
postSynaptic['MVL09'][nextState] += 4
postSynaptic['MVL10'][nextState] += 4
postSynaptic['MVL12'][nextState] += 4
postSynaptic['MVR07'][nextState] += 4
postSynaptic['MVR09'][nextState] += 4
postSynaptic['MVR10'][nextState] += 4
postSynaptic['MVR12'][nextState] += 4
postSynaptic['RIGL'][nextState] += 1
postSynaptic['VA2'][nextState] += 1
postSynaptic['VB1'][nextState] += 4
postSynaptic['VB3'][nextState] += 1
postSynaptic['VB5'][nextState] += 1
postSynaptic['VB7'][nextState] += 2
postSynaptic['VC2'][nextState] += 1
postSynaptic['VD2'][nextState] += 9
postSynaptic['VD3'][nextState] += 3
def VB3():
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DD2'][nextState] += 37
postSynaptic['MVL11'][nextState] += 6
postSynaptic['MVL12'][nextState] += 6
postSynaptic['MVL14'][nextState] += 6
postSynaptic['MVR11'][nextState] += 6
postSynaptic['MVR12'][nextState] += 6
postSynaptic['MVR14'][nextState] += 6
postSynaptic['VA4'][nextState] += 1
postSynaptic['VA7'][nextState] += 1
postSynaptic['VB2'][nextState] += 1
def VB4():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DB1'][nextState] += 1
postSynaptic['DB4'][nextState] += 1
postSynaptic['DD2'][nextState] += 6
postSynaptic['DD3'][nextState] += 16
postSynaptic['MVL11'][nextState] += 5
postSynaptic['MVL14'][nextState] += 5
postSynaptic['MVR11'][nextState] += 5
postSynaptic['MVR14'][nextState] += 5
postSynaptic['VB5'][nextState] += 1
def VB5():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['DD3'][nextState] += 27
postSynaptic['MVL13'][nextState] += 6
postSynaptic['MVL14'][nextState] += 6
postSynaptic['MVR13'][nextState] += 6
postSynaptic['MVR14'][nextState] += 6
postSynaptic['VB2'][nextState] += 1
postSynaptic['VB4'][nextState] += 1
postSynaptic['VB6'][nextState] += 8
def VB6():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 2
postSynaptic['DA4'][nextState] += 1
postSynaptic['DD4'][nextState] += 30
postSynaptic['MVL15'][nextState] += 6
postSynaptic['MVL16'][nextState] += 6
postSynaptic['MVR15'][nextState] += 6
postSynaptic['MVR16'][nextState] += 6
postSynaptic['MVULVA'][nextState] += 6
postSynaptic['VA8'][nextState] += 1
postSynaptic['VB5'][nextState] += 1
postSynaptic['VB7'][nextState] += 1
postSynaptic['VD6'][nextState] += 1
postSynaptic['VD7'][nextState] += 8
def VB7():
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 2
postSynaptic['DD4'][nextState] += 2
postSynaptic['MVL15'][nextState] += 5
postSynaptic['MVR15'][nextState] += 5
postSynaptic['VB2'][nextState] += 2
def VB8():
postSynaptic['AVBL'][nextState] += 7
postSynaptic['AVBR'][nextState] += 3
postSynaptic['DD5'][nextState] += 30
postSynaptic['MVL17'][nextState] += 5
postSynaptic['MVL18'][nextState] += 5
postSynaptic['MVL20'][nextState] += 5
postSynaptic['MVR17'][nextState] += 5
postSynaptic['MVR18'][nextState] += 5
postSynaptic['MVR20'][nextState] += 5
postSynaptic['VA8'][nextState] += 3
postSynaptic['VA9'][nextState] += 9
postSynaptic['VA9'][nextState] += 1
postSynaptic['VB9'][nextState] += 6
postSynaptic['VD10'][nextState] += 1
postSynaptic['VD9'][nextState] += 10
def VB9():
postSynaptic['AVAL'][nextState] += 5
postSynaptic['AVAR'][nextState] += 4
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVBR'][nextState] += 6
postSynaptic['DD5'][nextState] += 8
postSynaptic['DVB'][nextState] += 1
postSynaptic['MVL17'][nextState] += 6
postSynaptic['MVL20'][nextState] += 6
postSynaptic['MVR17'][nextState] += 6
postSynaptic['MVR20'][nextState] += 6
postSynaptic['PVCL'][nextState] += 2
postSynaptic['VA8'][nextState] += 3
postSynaptic['VA9'][nextState] += 4
postSynaptic['VB8'][nextState] += 1
postSynaptic['VB8'][nextState] += 3
postSynaptic['VD10'][nextState] += 5
def VB10():
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['AVKL'][nextState] += 1
postSynaptic['DD6'][nextState] += 9
postSynaptic['MVL19'][nextState] += 5
postSynaptic['MVL20'][nextState] += 5
postSynaptic['MVR19'][nextState] += 5
postSynaptic['MVR20'][nextState] += 5
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['VD11'][nextState] += 1
postSynaptic['VD12'][nextState] += 2
def VB11():
postSynaptic['AVBL'][nextState] += 2
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DD6'][nextState] += 7
postSynaptic['MVL21'][nextState] += 5
postSynaptic['MVL22'][nextState] += 5
postSynaptic['MVL23'][nextState] += 5
postSynaptic['MVR21'][nextState] += 5
postSynaptic['MVR22'][nextState] += 5
postSynaptic['MVR23'][nextState] += 5
postSynaptic['MVR24'][nextState] += 5
postSynaptic['PVCR'][nextState] += 1
postSynaptic['VA12'][nextState] += 2
def VC1():
postSynaptic['AVL'][nextState] += 2
postSynaptic['DD1'][nextState] += 7
postSynaptic['DD2'][nextState] += 6
postSynaptic['DD3'][nextState] += 6
postSynaptic['DVC'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 6
postSynaptic['PVT'][nextState] += 2
postSynaptic['VC2'][nextState] += 9
postSynaptic['VC3'][nextState] += 3
postSynaptic['VD1'][nextState] += 5
postSynaptic['VD2'][nextState] += 1
postSynaptic['VD3'][nextState] += 1
postSynaptic['VD4'][nextState] += 2
postSynaptic['VD5'][nextState] += 5
postSynaptic['VD6'][nextState] += 1
def VC2():
postSynaptic['DB4'][nextState] += 1
postSynaptic['DD1'][nextState] += 6
postSynaptic['DD2'][nextState] += 4
postSynaptic['DD3'][nextState] += 9
postSynaptic['DVC'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 10
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVQR'][nextState] += 1
postSynaptic['PVT'][nextState] += 2
postSynaptic['VC1'][nextState] += 10
postSynaptic['VC3'][nextState] += 6
postSynaptic['VD1'][nextState] += 2
postSynaptic['VD2'][nextState] += 2
postSynaptic['VD4'][nextState] += 5
postSynaptic['VD5'][nextState] += 5
postSynaptic['VD6'][nextState] += 1
def VC3():
postSynaptic['AVL'][nextState] += 1
postSynaptic['DD1'][nextState] += 2
postSynaptic['DD2'][nextState] += 4
postSynaptic['DD3'][nextState] += 5
postSynaptic['DD4'][nextState] += 13
postSynaptic['DVC'][nextState] += 1
postSynaptic['HSNR'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 11
postSynaptic['PVNR'][nextState] += 1
postSynaptic['PVPR'][nextState] += 1
postSynaptic['PVQR'][nextState] += 4
postSynaptic['VC1'][nextState] += 4
postSynaptic['VC2'][nextState] += 3
postSynaptic['VC4'][nextState] += 1
postSynaptic['VC5'][nextState] += 2
postSynaptic['VD1'][nextState] += 1
postSynaptic['VD2'][nextState] += 1
postSynaptic['VD3'][nextState] += 1
postSynaptic['VD4'][nextState] += 2
postSynaptic['VD5'][nextState] += 4
postSynaptic['VD6'][nextState] += 4
postSynaptic['VD7'][nextState] += 5
def VC4():
postSynaptic['AVBL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['AVHR'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 7
postSynaptic['VC1'][nextState] += 1
postSynaptic['VC3'][nextState] += 5
postSynaptic['VC5'][nextState] += 2
def VC5():
postSynaptic['AVFL'][nextState] += 1
postSynaptic['AVFR'][nextState] += 1
postSynaptic['DVC'][nextState] += 2
postSynaptic['HSNL'][nextState] += 1
postSynaptic['MVULVA'][nextState] += 2
postSynaptic['OLLR'][nextState] += 1
postSynaptic['PVT'][nextState] += 1
postSynaptic['URBL'][nextState] += 3
postSynaptic['VC3'][nextState] += 3
postSynaptic['VC4'][nextState] += 2
def VC6():
postSynaptic['MVULVA'][nextState] += 1
def VD1():
postSynaptic['DD1'][nextState] += 5
postSynaptic['DVC'][nextState] += 5
postSynaptic['MVL05'][nextState] += -5
postSynaptic['MVL08'][nextState] += -5
postSynaptic['MVR05'][nextState] += -5
postSynaptic['MVR08'][nextState] += -5
postSynaptic['RIFL'][nextState] += 1
postSynaptic['RIGL'][nextState] += 2
postSynaptic['SMDDR'][nextState] += 1
postSynaptic['VA1'][nextState] += 2
postSynaptic['VA2'][nextState] += 1
postSynaptic['VC1'][nextState] += 1
postSynaptic['VD2'][nextState] += 7
def VD2():
postSynaptic['AS1'][nextState] += 1
postSynaptic['DD1'][nextState] += 3
postSynaptic['MVL07'][nextState] += -7
postSynaptic['MVL10'][nextState] += -7
postSynaptic['MVR07'][nextState] += -7
postSynaptic['MVR10'][nextState] += -7
postSynaptic['VA2'][nextState] += 9
postSynaptic['VB2'][nextState] += 3
postSynaptic['VD1'][nextState] += 7
postSynaptic['VD3'][nextState] += 2
def VD3():
postSynaptic['MVL09'][nextState] += -7
postSynaptic['MVL12'][nextState] += -9
postSynaptic['MVR09'][nextState] += -7
postSynaptic['MVR12'][nextState] += -7
postSynaptic['PVPL'][nextState] += 1
postSynaptic['VA3'][nextState] += 2
postSynaptic['VB2'][nextState] += 2
postSynaptic['VD2'][nextState] += 2
postSynaptic['VD4'][nextState] += 1
def VD4():
postSynaptic['DD2'][nextState] += 2
postSynaptic['MVL11'][nextState] += -9
postSynaptic['MVL12'][nextState] += -9
postSynaptic['MVR11'][nextState] += -9
postSynaptic['MVR12'][nextState] += -9
postSynaptic['PVPR'][nextState] += 1
postSynaptic['VD3'][nextState] += 1
postSynaptic['VD5'][nextState] += 1
def VD5():
postSynaptic['AVAR'][nextState] += 1
postSynaptic['MVL14'][nextState] += -17
postSynaptic['MVR14'][nextState] += -17
postSynaptic['PVPR'][nextState] += 1
postSynaptic['VA5'][nextState] += 2
postSynaptic['VB4'][nextState] += 2
postSynaptic['VD4'][nextState] += 1
postSynaptic['VD6'][nextState] += 2
def VD6():
postSynaptic['AVAL'][nextState] += 1
postSynaptic['MVL13'][nextState] += -7
postSynaptic['MVL14'][nextState] += -7
postSynaptic['MVL16'][nextState] += -7
postSynaptic['MVR13'][nextState] += -7
postSynaptic['MVR14'][nextState] += -7
postSynaptic['MVR16'][nextState] += -7
postSynaptic['VA6'][nextState] += 1
postSynaptic['VB5'][nextState] += 2
postSynaptic['VD5'][nextState] += 2
postSynaptic['VD7'][nextState] += 1
def VD7():
postSynaptic['MVL15'][nextState] += -7
postSynaptic['MVL16'][nextState] += -7
postSynaptic['MVR15'][nextState] += -7
postSynaptic['MVR16'][nextState] += -7
postSynaptic['MVULVA'][nextState] += -15
postSynaptic['VA9'][nextState] += 1
postSynaptic['VD6'][nextState] += 1
def VD8():
postSynaptic['DD4'][nextState] += 2
postSynaptic['MVL15'][nextState] += -18
postSynaptic['MVR15'][nextState] += -18
postSynaptic['VA8'][nextState] += 5
def VD9():
postSynaptic['MVL17'][nextState] += -10
postSynaptic['MVL18'][nextState] += -10
postSynaptic['MVR17'][nextState] += -10
postSynaptic['MVR18'][nextState] += -10
postSynaptic['PDER'][nextState] += 1
postSynaptic['VD10'][nextState] += 5
def VD10():
postSynaptic['AVBR'][nextState] += 1
postSynaptic['DD5'][nextState] += 2
postSynaptic['DVC'][nextState] += 4
postSynaptic['MVL17'][nextState] += -9
postSynaptic['MVL20'][nextState] += -9
postSynaptic['MVR17'][nextState] += -9
postSynaptic['MVR20'][nextState] += -9
postSynaptic['VB9'][nextState] += 2
postSynaptic['VD9'][nextState] += 5
def VD11():
postSynaptic['AVAR'][nextState] += 2
postSynaptic['MVL19'][nextState] += -9
postSynaptic['MVL20'][nextState] += -9
postSynaptic['MVR19'][nextState] += -9
postSynaptic['MVR20'][nextState] += -9
postSynaptic['VA11'][nextState] += 1
postSynaptic['VB10'][nextState] += 1
def VD12():
postSynaptic['MVL19'][nextState] += -5
postSynaptic['MVL21'][nextState] += -5
postSynaptic['MVR19'][nextState] += -5
postSynaptic['MVR22'][nextState] += -5
postSynaptic['VA11'][nextState] += 3
postSynaptic['VA12'][nextState] += 2
postSynaptic['VB10'][nextState] += 1
postSynaptic['VB11'][nextState] += 1
def VD13():
postSynaptic['AVAR'][nextState] += 2
postSynaptic['MVL21'][nextState] += -9
postSynaptic['MVL22'][nextState] += -9
postSynaptic['MVL23'][nextState] += -9
postSynaptic['MVR21'][nextState] += -9
postSynaptic['MVR22'][nextState] += -9
postSynaptic['MVR23'][nextState] += -9
postSynaptic['MVR24'][nextState] += -9
postSynaptic['PVCL'][nextState] += 1
postSynaptic['PVCR'][nextState] += 1
postSynaptic['PVPL'][nextState] += 2
postSynaptic['VA12'][nextState] += 1
def createpostSynaptic():
# The postSynaptic dictionary maintains the accumulated values for
# each neuron and muscle. The Accumulated values are initialized to Zero
postSynaptic['ADAL'] = [0,0]
postSynaptic['ADAR'] = [0,0]
postSynaptic['ADEL'] = [0,0]
postSynaptic['ADER'] = [0,0]
postSynaptic['ADFL'] = [0,0]
postSynaptic['ADFR'] = [0,0]
postSynaptic['ADLL'] = [0,0]
postSynaptic['ADLR'] = [0,0]
postSynaptic['AFDL'] = [0,0]
postSynaptic['AFDR'] = [0,0]
postSynaptic['AIAL'] = [0,0]
postSynaptic['AIAR'] = [0,0]
postSynaptic['AIBL'] = [0,0]
postSynaptic['AIBR'] = [0,0]
postSynaptic['AIML'] = [0,0]
postSynaptic['AIMR'] = [0,0]
postSynaptic['AINL'] = [0,0]
postSynaptic['AINR'] = [0,0]
postSynaptic['AIYL'] = [0,0]
postSynaptic['AIYR'] = [0,0]
postSynaptic['AIZL'] = [0,0]
postSynaptic['AIZR'] = [0,0]
postSynaptic['ALA'] = [0,0]
postSynaptic['ALML'] = [0,0]
postSynaptic['ALMR'] = [0,0]
postSynaptic['ALNL'] = [0,0]
postSynaptic['ALNR'] = [0,0]
postSynaptic['AQR'] = [0,0]
postSynaptic['AS1'] = [0,0]
postSynaptic['AS10'] = [0,0]
postSynaptic['AS11'] = [0,0]
postSynaptic['AS2'] = [0,0]
postSynaptic['AS3'] = [0,0]
postSynaptic['AS4'] = [0,0]
postSynaptic['AS5'] = [0,0]
postSynaptic['AS6'] = [0,0]
postSynaptic['AS7'] = [0,0]
postSynaptic['AS8'] = [0,0]
postSynaptic['AS9'] = [0,0]
postSynaptic['ASEL'] = [0,0]
postSynaptic['ASER'] = [0,0]
postSynaptic['ASGL'] = [0,0]
postSynaptic['ASGR'] = [0,0]
postSynaptic['ASHL'] = [0,0]
postSynaptic['ASHR'] = [0,0]
postSynaptic['ASIL'] = [0,0]
postSynaptic['ASIR'] = [0,0]
postSynaptic['ASJL'] = [0,0]
postSynaptic['ASJR'] = [0,0]
postSynaptic['ASKL'] = [0,0]
postSynaptic['ASKR'] = [0,0]
postSynaptic['AUAL'] = [0,0]
postSynaptic['AUAR'] = [0,0]
postSynaptic['AVAL'] = [0,0]
postSynaptic['AVAR'] = [0,0]
postSynaptic['AVBL'] = [0,0]
postSynaptic['AVBR'] = [0,0]
postSynaptic['AVDL'] = [0,0]
postSynaptic['AVDR'] = [0,0]
postSynaptic['AVEL'] = [0,0]
postSynaptic['AVER'] = [0,0]
postSynaptic['AVFL'] = [0,0]
postSynaptic['AVFR'] = [0,0]
postSynaptic['AVG'] = [0,0]
postSynaptic['AVHL'] = [0,0]
postSynaptic['AVHR'] = [0,0]
postSynaptic['AVJL'] = [0,0]
postSynaptic['AVJR'] = [0,0]
postSynaptic['AVKL'] = [0,0]
postSynaptic['AVKR'] = [0,0]
postSynaptic['AVL'] = [0,0]
postSynaptic['AVM'] = [0,0]
postSynaptic['AWAL'] = [0,0]
postSynaptic['AWAR'] = [0,0]
postSynaptic['AWBL'] = [0,0]
postSynaptic['AWBR'] = [0,0]
postSynaptic['AWCL'] = [0,0]
postSynaptic['AWCR'] = [0,0]
postSynaptic['BAGL'] = [0,0]
postSynaptic['BAGR'] = [0,0]
postSynaptic['BDUL'] = [0,0]
postSynaptic['BDUR'] = [0,0]
postSynaptic['CEPDL'] = [0,0]
postSynaptic['CEPDR'] = [0,0]
postSynaptic['CEPVL'] = [0,0]
postSynaptic['CEPVR'] = [0,0]
postSynaptic['DA1'] = [0,0]
postSynaptic['DA2'] = [0,0]
postSynaptic['DA3'] = [0,0]
postSynaptic['DA4'] = [0,0]
postSynaptic['DA5'] = [0,0]
postSynaptic['DA6'] = [0,0]
postSynaptic['DA7'] = [0,0]
postSynaptic['DA8'] = [0,0]
postSynaptic['DA9'] = [0,0]
postSynaptic['DB1'] = [0,0]
postSynaptic['DB2'] = [0,0]
postSynaptic['DB3'] = [0,0]
postSynaptic['DB4'] = [0,0]
postSynaptic['DB5'] = [0,0]
postSynaptic['DB6'] = [0,0]
postSynaptic['DB7'] = [0,0]
postSynaptic['DD1'] = [0,0]
postSynaptic['DD2'] = [0,0]
postSynaptic['DD3'] = [0,0]
postSynaptic['DD4'] = [0,0]
postSynaptic['DD5'] = [0,0]
postSynaptic['DD6'] = [0,0]
postSynaptic['DVA'] = [0,0]
postSynaptic['DVB'] = [0,0]
postSynaptic['DVC'] = [0,0]
postSynaptic['FLPL'] = [0,0]
postSynaptic['FLPR'] = [0,0]
postSynaptic['HSNL'] = [0,0]
postSynaptic['HSNR'] = [0,0]
postSynaptic['I1L'] = [0,0]
postSynaptic['I1R'] = [0,0]
postSynaptic['I2L'] = [0,0]
postSynaptic['I2R'] = [0,0]
postSynaptic['I3'] = [0,0]
postSynaptic['I4'] = [0,0]
postSynaptic['I5'] = [0,0]
postSynaptic['I6'] = [0,0]
postSynaptic['IL1DL'] = [0,0]
postSynaptic['IL1DR'] = [0,0]
postSynaptic['IL1L'] = [0,0]
postSynaptic['IL1R'] = [0,0]
postSynaptic['IL1VL'] = [0,0]
postSynaptic['IL1VR'] = [0,0]
postSynaptic['IL2L'] = [0,0]
postSynaptic['IL2R'] = [0,0]
postSynaptic['IL2DL'] = [0,0]
postSynaptic['IL2DR'] = [0,0]
postSynaptic['IL2VL'] = [0,0]
postSynaptic['IL2VR'] = [0,0]
postSynaptic['LUAL'] = [0,0]
postSynaptic['LUAR'] = [0,0]
postSynaptic['M1'] = [0,0]
postSynaptic['M2L'] = [0,0]
postSynaptic['M2R'] = [0,0]
postSynaptic['M3L'] = [0,0]
postSynaptic['M3R'] = [0,0]
postSynaptic['M4'] = [0,0]
postSynaptic['M5'] = [0,0]
postSynaptic['MANAL'] = [0,0]
postSynaptic['MCL'] = [0,0]
postSynaptic['MCR'] = [0,0]
postSynaptic['MDL01'] = [0,0]
postSynaptic['MDL02'] = [0,0]
postSynaptic['MDL03'] = [0,0]
postSynaptic['MDL04'] = [0,0]
postSynaptic['MDL05'] = [0,0]
postSynaptic['MDL06'] = [0,0]
postSynaptic['MDL07'] = [0,0]
postSynaptic['MDL08'] = [0,0]
postSynaptic['MDL09'] = [0,0]
postSynaptic['MDL10'] = [0,0]
postSynaptic['MDL11'] = [0,0]
postSynaptic['MDL12'] = [0,0]
postSynaptic['MDL13'] = [0,0]
postSynaptic['MDL14'] = [0,0]
postSynaptic['MDL15'] = [0,0]
postSynaptic['MDL16'] = [0,0]
postSynaptic['MDL17'] = [0,0]
postSynaptic['MDL18'] = [0,0]
postSynaptic['MDL19'] = [0,0]
postSynaptic['MDL20'] = [0,0]
postSynaptic['MDL21'] = [0,0]
postSynaptic['MDL22'] = [0,0]
postSynaptic['MDL23'] = [0,0]
postSynaptic['MDL24'] = [0,0]
postSynaptic['MDR01'] = [0,0]
postSynaptic['MDR02'] = [0,0]
postSynaptic['MDR03'] = [0,0]
postSynaptic['MDR04'] = [0,0]
postSynaptic['MDR05'] = [0,0]
postSynaptic['MDR06'] = [0,0]
postSynaptic['MDR07'] = [0,0]
postSynaptic['MDR08'] = [0,0]
postSynaptic['MDR09'] = [0,0]
postSynaptic['MDR10'] = [0,0]
postSynaptic['MDR11'] = [0,0]
postSynaptic['MDR12'] = [0,0]
postSynaptic['MDR13'] = [0,0]
postSynaptic['MDR14'] = [0,0]
postSynaptic['MDR15'] = [0,0]
postSynaptic['MDR16'] = [0,0]
postSynaptic['MDR17'] = [0,0]
postSynaptic['MDR18'] = [0,0]
postSynaptic['MDR19'] = [0,0]
postSynaptic['MDR20'] = [0,0]
postSynaptic['MDR21'] = [0,0]
postSynaptic['MDR22'] = [0,0]
postSynaptic['MDR23'] = [0,0]
postSynaptic['MDR24'] = [0,0]
postSynaptic['MI'] = [0,0]
postSynaptic['MVL01'] = [0,0]
postSynaptic['MVL02'] = [0,0]
postSynaptic['MVL03'] = [0,0]
postSynaptic['MVL04'] = [0,0]
postSynaptic['MVL05'] = [0,0]
postSynaptic['MVL06'] = [0,0]
postSynaptic['MVL07'] = [0,0]
postSynaptic['MVL08'] = [0,0]
postSynaptic['MVL09'] = [0,0]
postSynaptic['MVL10'] = [0,0]
postSynaptic['MVL11'] = [0,0]
postSynaptic['MVL12'] = [0,0]
postSynaptic['MVL13'] = [0,0]
postSynaptic['MVL14'] = [0,0]
postSynaptic['MVL15'] = [0,0]
postSynaptic['MVL16'] = [0,0]
postSynaptic['MVL17'] = [0,0]
postSynaptic['MVL18'] = [0,0]
postSynaptic['MVL19'] = [0,0]
postSynaptic['MVL20'] = [0,0]
postSynaptic['MVL21'] = [0,0]
postSynaptic['MVL22'] = [0,0]
postSynaptic['MVL23'] = [0,0]
postSynaptic['MVR01'] = [0,0]
postSynaptic['MVR02'] = [0,0]
postSynaptic['MVR03'] = [0,0]
postSynaptic['MVR04'] = [0,0]
postSynaptic['MVR05'] = [0,0]
postSynaptic['MVR06'] = [0,0]
postSynaptic['MVR07'] = [0,0]
postSynaptic['MVR08'] = [0,0]
postSynaptic['MVR09'] = [0,0]
postSynaptic['MVR10'] = [0,0]
postSynaptic['MVR11'] = [0,0]
postSynaptic['MVR12'] = [0,0]
postSynaptic['MVR13'] = [0,0]
postSynaptic['MVR14'] = [0,0]
postSynaptic['MVR15'] = [0,0]
postSynaptic['MVR16'] = [0,0]
postSynaptic['MVR17'] = [0,0]
postSynaptic['MVR18'] = [0,0]
postSynaptic['MVR19'] = [0,0]
postSynaptic['MVR20'] = [0,0]
postSynaptic['MVR21'] = [0,0]
postSynaptic['MVR22'] = [0,0]
postSynaptic['MVR23'] = [0,0]
postSynaptic['MVR24'] = [0,0]
postSynaptic['MVULVA'] = [0,0]
postSynaptic['NSML'] = [0,0]
postSynaptic['NSMR'] = [0,0]
postSynaptic['OLLL'] = [0,0]
postSynaptic['OLLR'] = [0,0]
postSynaptic['OLQDL'] = [0,0]
postSynaptic['OLQDR'] = [0,0]
postSynaptic['OLQVL'] = [0,0]
postSynaptic['OLQVR'] = [0,0]
postSynaptic['PDA'] = [0,0]
postSynaptic['PDB'] = [0,0]
postSynaptic['PDEL'] = [0,0]
postSynaptic['PDER'] = [0,0]
postSynaptic['PHAL'] = [0,0]
postSynaptic['PHAR'] = [0,0]
postSynaptic['PHBL'] = [0,0]
postSynaptic['PHBR'] = [0,0]
postSynaptic['PHCL'] = [0,0]
postSynaptic['PHCR'] = [0,0]
postSynaptic['PLML'] = [0,0]
postSynaptic['PLMR'] = [0,0]
postSynaptic['PLNL'] = [0,0]
postSynaptic['PLNR'] = [0,0]
postSynaptic['PQR'] = [0,0]
postSynaptic['PVCL'] = [0,0]
postSynaptic['PVCR'] = [0,0]
postSynaptic['PVDL'] = [0,0]
postSynaptic['PVDR'] = [0,0]
postSynaptic['PVM'] = [0,0]
postSynaptic['PVNL'] = [0,0]
postSynaptic['PVNR'] = [0,0]
postSynaptic['PVPL'] = [0,0]
postSynaptic['PVPR'] = [0,0]
postSynaptic['PVQL'] = [0,0]
postSynaptic['PVQR'] = [0,0]
postSynaptic['PVR'] = [0,0]
postSynaptic['PVT'] = [0,0]
postSynaptic['PVWL'] = [0,0]
postSynaptic['PVWR'] = [0,0]
postSynaptic['RIAL'] = [0,0]
postSynaptic['RIAR'] = [0,0]
postSynaptic['RIBL'] = [0,0]
postSynaptic['RIBR'] = [0,0]
postSynaptic['RICL'] = [0,0]
postSynaptic['RICR'] = [0,0]
postSynaptic['RID'] = [0,0]
postSynaptic['RIFL'] = [0,0]
postSynaptic['RIFR'] = [0,0]
postSynaptic['RIGL'] = [0,0]
postSynaptic['RIGR'] = [0,0]
postSynaptic['RIH'] = [0,0]
postSynaptic['RIML'] = [0,0]
postSynaptic['RIMR'] = [0,0]
postSynaptic['RIPL'] = [0,0]
postSynaptic['RIPR'] = [0,0]
postSynaptic['RIR'] = [0,0]
postSynaptic['RIS'] = [0,0]
postSynaptic['RIVL'] = [0,0]
postSynaptic['RIVR'] = [0,0]
postSynaptic['RMDDL'] = [0,0]
postSynaptic['RMDDR'] = [0,0]
postSynaptic['RMDL'] = [0,0]
postSynaptic['RMDR'] = [0,0]
postSynaptic['RMDVL'] = [0,0]
postSynaptic['RMDVR'] = [0,0]
postSynaptic['RMED'] = [0,0]
postSynaptic['RMEL'] = [0,0]
postSynaptic['RMER'] = [0,0]
postSynaptic['RMEV'] = [0,0]
postSynaptic['RMFL'] = [0,0]
postSynaptic['RMFR'] = [0,0]
postSynaptic['RMGL'] = [0,0]
postSynaptic['RMGR'] = [0,0]
postSynaptic['RMHL'] = [0,0]
postSynaptic['RMHR'] = [0,0]
postSynaptic['SAADL'] = [0,0]
postSynaptic['SAADR'] = [0,0]
postSynaptic['SAAVL'] = [0,0]
postSynaptic['SAAVR'] = [0,0]
postSynaptic['SABD'] = [0,0]
postSynaptic['SABVL'] = [0,0]
postSynaptic['SABVR'] = [0,0]
postSynaptic['SDQL'] = [0,0]
postSynaptic['SDQR'] = [0,0]
postSynaptic['SIADL'] = [0,0]
postSynaptic['SIADR'] = [0,0]
postSynaptic['SIAVL'] = [0,0]
postSynaptic['SIAVR'] = [0,0]
postSynaptic['SIBDL'] = [0,0]
postSynaptic['SIBDR'] = [0,0]
postSynaptic['SIBVL'] = [0,0]
postSynaptic['SIBVR'] = [0,0]
postSynaptic['SMBDL'] = [0,0]
postSynaptic['SMBDR'] = [0,0]
postSynaptic['SMBVL'] = [0,0]
postSynaptic['SMBVR'] = [0,0]
postSynaptic['SMDDL'] = [0,0]
postSynaptic['SMDDR'] = [0,0]
postSynaptic['SMDVL'] = [0,0]
postSynaptic['SMDVR'] = [0,0]
postSynaptic['URADL'] = [0,0]
postSynaptic['URADR'] = [0,0]
postSynaptic['URAVL'] = [0,0]
postSynaptic['URAVR'] = [0,0]
postSynaptic['URBL'] = [0,0]
postSynaptic['URBR'] = [0,0]
postSynaptic['URXL'] = [0,0]
postSynaptic['URXR'] = [0,0]
postSynaptic['URYDL'] = [0,0]
postSynaptic['URYDR'] = [0,0]
postSynaptic['URYVL'] = [0,0]
postSynaptic['URYVR'] = [0,0]
postSynaptic['VA1'] = [0,0]
postSynaptic['VA10'] = [0,0]
postSynaptic['VA11'] = [0,0]
postSynaptic['VA12'] = [0,0]
postSynaptic['VA2'] = [0,0]
postSynaptic['VA3'] = [0,0]
postSynaptic['VA4'] = [0,0]
postSynaptic['VA5'] = [0,0]
postSynaptic['VA6'] = [0,0]
postSynaptic['VA7'] = [0,0]
postSynaptic['VA8'] = [0,0]
postSynaptic['VA9'] = [0,0]
postSynaptic['VB1'] = [0,0]
postSynaptic['VB10'] = [0,0]
postSynaptic['VB11'] = [0,0]
postSynaptic['VB2'] = [0,0]
postSynaptic['VB3'] = [0,0]
postSynaptic['VB4'] = [0,0]
postSynaptic['VB5'] = [0,0]
postSynaptic['VB6'] = [0,0]
postSynaptic['VB7'] = [0,0]
postSynaptic['VB8'] = [0,0]
postSynaptic['VB9'] = [0,0]
postSynaptic['VC1'] = [0,0]
postSynaptic['VC2'] = [0,0]
postSynaptic['VC3'] = [0,0]
postSynaptic['VC4'] = [0,0]
postSynaptic['VC5'] = [0,0]
postSynaptic['VC6'] = [0,0]
postSynaptic['VD1'] = [0,0]
postSynaptic['VD10'] = [0,0]
postSynaptic['VD11'] = [0,0]
postSynaptic['VD12'] = [0,0]
postSynaptic['VD13'] = [0,0]
postSynaptic['VD2'] = [0,0]
postSynaptic['VD3'] = [0,0]
postSynaptic['VD4'] = [0,0]
postSynaptic['VD5'] = [0,0]
postSynaptic['VD6'] = [0,0]
postSynaptic['VD7'] = [0,0]
postSynaptic['VD8'] = [0,0]
postSynaptic['VD9'] = [0,0]
#global postSynapticNext = copy.deepcopy(postSynaptic)
def motorcontrol():
global accumright
global accumleft
# accumulate left and right muscles and the accumulated values are
# used to move the left and right motors of the robot
for muscle in muscleList:
if muscle in mLeft:
accumleft += postSynaptic[muscle][nextState]
#accumleft = accumleft + postSynaptic[muscle][thisState] #what??? For some reason, thisState weight is always 0.
#postSynaptic[muscle][thisState] = 0
print muscle, "Before", postSynaptic[muscle][thisState], accumleft #Both states have to be set to 0 once the muscle is fired, or
postSynaptic[muscle][nextState] = 0
print muscle, "After", postSynaptic[muscle][thisState], accumleft # it will keep returning beyond the threshold within one iteration.
elif muscle in mRight:
accumright += postSynaptic[muscle][nextState]
#accumleft = accumright + postSynaptic[muscle][thisState] #what???
#postSynaptic[muscle][thisState] = 0
postSynaptic[muscle][nextState] = 0
# We turn the wheels according to the motor weight accumulation
new_speed = abs(accumleft) + abs(accumright)
if new_speed > 150:
new_speed = 150
elif new_speed < 75:
new_speed = 75
print "Left: ", accumleft, "Right:", accumright, "Speed: ", new_speed
accumleft = 0
accumright = 0
## Start Commented section
# set_speed(new_speed)
# if accumleft == 0 and accumright == 0:
# stop()
# elif accumright <= 0 and accumleft < 0:
# set_speed(150)
# turnratio = float(accumright) / float(accumleft)
# # print "Turn Ratio: ", turnratio
# if turnratio <= 0.6:
# left_rot()
# time.sleep(0.8)
# elif turnratio >= 2:
# right_rot()
# time.sleep(0.8)
# bwd()
# time.sleep(0.5)
# elif accumright <= 0 and accumleft >= 0:
# right_rot()
# time.sleep(.8)
# elif accumright >= 0 and accumleft <= 0:
# left_rot()
# time.sleep(.8)
# elif accumright >= 0 and accumleft > 0:
# turnratio = float(accumright) / float(accumleft)
# # print "Turn Ratio: ", turnratio
# if turnratio <= 0.6:
# left_rot()
# time.sleep(0.8)
# elif turnratio >= 2:
# right_rot()
# time.sleep(0.8)
# fwd()
# time.sleep(0.5)
# else:
# stop()
## End Commented section
accumleft = 0
accumright = 0
time.sleep(0.5)
def dendriteAccumulate(dneuron):
f = eval(dneuron)
f()
def fireNeuron(fneuron):
# The threshold has been exceeded and we fire the neurite
if fneuron != "MVULVA":
f = eval(fneuron)
f()
#postSynaptic[fneuron][nextState] = 0
#postSynaptic[fneuron][thisState] = 0
postSynaptic[fneuron][nextState] = 0
def runconnectome():
# Each time a set of neuron is stimulated, this method will execute
# The weigted values are accumulated in the postSynaptic array
# Once the accumulation is read, we see what neurons are greater
# then the threshold and fire the neuron or muscle that has exceeded
# the threshold
global thisState
global nextState
for ps in postSynaptic:
if ps[:3] not in muscles and abs(postSynaptic[ps][thisState]) > threshold:
fireNeuron(ps)
#print ps
#print (ps)
#postSynaptic[ps][nextState] = 0
motorcontrol()
for ps in postSynaptic:
#if postSynaptic[ps][thisState] != 0:
#print ps
#print "Before Clone: ", postSynaptic[ps][thisState]
postSynaptic[ps][thisState] = copy.deepcopy(postSynaptic[ps][nextState]) #fired neurons keep getting reset to previous weight
#print "After Clone: ", postSynaptic[ps][thisState]
thisState,nextState=nextState,thisState
# Create the dictionary
createpostSynaptic()
dist=0
#set_speed(120)
print "Voltage: "#, volt()
tfood = 0
try:
### Here is where you would put in a method to stimulate the neurons ###
### We stimulate chemosensory neurons constantly unless nose touch ###
### (sonar) is stimulated and then we fire nose touch neurites ###
### Use CNTRL-C to stop the program
while True:
## Start comment - use a fixed value if you want to stimulte nose touch
## use something like "dist = 27" if you want to stop nose stimulation
#dist = us_dist(15)
## End Comment
#Do we need to switch states at the end of each loop? No, this is done inside the runconnectome()
#function, called inside each loop.
if dist>0 and dist<30:
print "OBSTACLE (Nose Touch)", dist
dendriteAccumulate("FLPR")
dendriteAccumulate("FLPL")
dendriteAccumulate("ASHL")
dendriteAccumulate("ASHR")
dendriteAccumulate("IL1VL")
dendriteAccumulate("IL1VR")
dendriteAccumulate("OLQDL")
dendriteAccumulate("OLQDR")
dendriteAccumulate("OLQVR")
dendriteAccumulate("OLQVL")
runconnectome()
else:
if tfood < 2:
print "FOOD"
dendriteAccumulate("ADFL")
dendriteAccumulate("ADFR")
dendriteAccumulate("ASGR")
dendriteAccumulate("ASGL")
dendriteAccumulate("ASIL")
dendriteAccumulate("ASIR")
dendriteAccumulate("ASJR")
dendriteAccumulate("ASJL")
runconnectome()
time.sleep(0.5)
tfood += 0.5
if (tfood > 20):
tfood = 0
except KeyboardInterrupt:
## Start Comment
#stop()
## End Comment
print "Ctrl+C detected. Program Stopped!"
for pscheck in postSynaptic:
print (pscheck,' ',postSynaptic[pscheck][0],' ',postSynaptic[pscheck][1])
|
mattbaker/GoPiGo
|
disembodiedConnectome.py
|
Python
|
gpl-2.0
| 197,938
|
[
"NEURON"
] |
184d6fe0cff9f0b8cfa59a63b0a6491f13c3d311eb595789ab5c8329fb0c7b46
|
"""
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/mixture/plot_gmm_pdf.py
|
Python
|
bsd-3-clause
| 1,528
|
[
"Gaussian"
] |
dd40ad1dc9ae7ac35da5e9717115ee8e27ef78bd8fabae7237294881d77db295
|
import unittest as ut
from .. import tabular as ta
from ....common import RTOL, ATOL, pandas, requires as _requires
from ....examples import get_path
from ...shapes import Polygon
from ....io import geotable as pdio
from ... import ops as GIS
import numpy as np
PANDAS_EXTINCT = pandas is None
@ut.skipIf(PANDAS_EXTINCT, 'missing pandas')
class Test_Tabular(ut.TestCase):
def setUp(self):
import pandas as pd
self.columbus = pdio.read_files(get_path('columbus.shp'))
grid = [Polygon([(0,0),(0,1),(1,1),(1,0)]),
Polygon([(0,1),(0,2),(1,2),(1,1)]),
Polygon([(1,2),(2,2),(2,1),(1,1)]),
Polygon([(1,1),(2,1),(2,0),(1,0)])]
regime = [0,0,1,1]
ids = range(4)
data = np.array((regime, ids)).T
self.exdf = pd.DataFrame(data, columns=['regime', 'ids'])
self.exdf['geometry'] = grid
@_requires('geopandas')
def test_round_trip(self):
import geopandas as gpd
import pandas as pd
geodf = GIS.tabular.to_gdf(self.columbus)
self.assertIsInstance(geodf, gpd.GeoDataFrame)
new_df = GIS.tabular.to_df(geodf)
self.assertIsInstance(new_df, pd.DataFrame)
for new, old in zip(new_df.geometry, self.columbus.geometry):
self.assertEquals(new, old)
def test_spatial_join(self):
pass
def test_spatial_overlay(self):
pass
def test_dissolve(self):
out = GIS.tabular.dissolve(self.exdf, by='regime')
self.assertEqual(out[0].area, 2.0)
self.assertEqual(out[1].area, 2.0)
answer_vertices0 = [(0,0), (0,1), (0,2), (1,2), (1,1), (1,0), (0,0)]
answer_vertices1 = [(2,1), (2,0), (1,0), (1,1), (1,2), (2,2), (2,1)]
np.testing.assert_allclose(out[0].vertices, answer_vertices0)
np.testing.assert_allclose(out[1].vertices, answer_vertices1)
def test_clip(self):
pass
def test_erase(self):
pass
def test_union(self):
new_geom = GIS.tabular.union(self.exdf)
self.assertEqual(new_geom.area, 4)
def test_intersection(self):
pass
def test_symmetric_difference(self):
pass
def test_difference(self):
pass
|
sjsrey/pysal_core
|
pysal_core/cg/ops/tests/test_tabular.py
|
Python
|
bsd-3-clause
| 2,231
|
[
"COLUMBUS"
] |
f9036421726682ad938f00bde364461a12d61c53eabeae86bae07ec92a0cdfbd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.