text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#pylint: disable=invalid-name
#! /usr/bin/python
# Convert an acor file into VTK format (specifically a vtp file)
from xml.dom import minidom
def convertToVTU(infile, outpath):
#first need to find some things from the file
datafile = open(infile,'r')
datalist=[]
planelist=[]
npoints = 0
for line in datafile:
numbers = line.split()
if len(numbers) != 4 :
continue
if npoints == 0 :
curz = numbers[2]
if numbers[2] != curz :
datalist.append(planelist)
curz = numbers[2]
planelist=[]
planelist.append(numbers)
npoints += 1
# Append last set
datalist.append(planelist)
datafile.close()
ncells = len(datalist)
doc = minidom.Document()
vtkfile = doc.createElement("VTKFile")
doc.appendChild(vtkfile)
vtkfile.setAttribute("type","UnstructuredGrid")
vtkfile.setAttribute("version","0.1")
vtkfile.setAttribute("byte_order", "LittleEndian")
ugrid = doc.createElement("UnstructuredGrid")
vtkfile.appendChild(ugrid)
piece = doc.createElement("Piece")
ugrid.appendChild(piece)
piece.setAttribute( "NumberOfPoints", str(npoints))
piece.setAttribute( "NumberOfCells", str(ncells))
# First the PointData element
point_data = doc.createElement("PointData")
piece.appendChild(point_data)
point_data.setAttribute("Scalars", "Intensity")
data_array = doc.createElement("DataArray")
point_data.appendChild(data_array)
data_array.setAttribute("type", "Float32")
data_array.setAttribute("Name", "Intensity")
data_array.setAttribute("format","ascii")
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(point[3]))
data_array.appendChild(txt)
# Now the Points element
points = doc.createElement("Points")
piece.appendChild(points)
data_array = doc.createElement("DataArray")
points.appendChild(data_array)
data_array.setAttribute("type", "Float32")
data_array.setAttribute("NumberOfComponents", "3")
data_array.setAttribute("format","ascii")
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(point[0]) + " " + str(point[1]) + " " +str(point[2]))
data_array.appendChild(txt)
cells = doc.createElement("Cells")
piece.appendChild(cells)
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "connectivity")
data_array.setAttribute("format","ascii")
i = 0
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(i))
data_array.appendChild(txt)
i += 1
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "offsets")
data_array.setAttribute("format","ascii")
i = 0
for plane in datalist:
i += len(plane)
txt = doc.createTextNode(str(i))
data_array.appendChild(txt)
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "types")
data_array.setAttribute("format","ascii")
for plane in datalist:
txt = doc.createTextNode("4")
data_array.appendChild(txt)
#print doc.toprettyxml(newl="\n")
shortname = infile.split('/')
name = outpath + shortname[len(shortname)-1] + ".vtu"
handle = open(name,'w')
doc.writexml(handle, newl="\n")
handle.close()
del datalist
del planelist
del doc
def writeParallelVTU(files, prefix):
doc = minidom.Document()
vtkfile = doc.createElement("VTKFile")
doc.appendChild(vtkfile)
vtkfile.setAttribute("type","PUnstructuredGrid")
vtkfile.setAttribute("version","0.1")
vtkfile.setAttribute("byte_order", "LittleEndian")
pugrid = doc.createElement("PUnstructuredGrid")
vtkfile.appendChild(pugrid)
pugrid.setAttribute("GhostLevel", "0")
ppointdata = doc.createElement("PPointData")
pugrid.appendChild(ppointdata)
ppointdata.setAttribute("Scalars","Intensity")
data_array = doc.createElement("PDataArray")
ppointdata.appendChild(data_array)
data_array.setAttribute("type","Float32")
data_array.setAttribute("Name","Intensity")
ppoints = doc.createElement("PPoints")
pugrid.appendChild(ppoints)
data_array = doc.createElement("PDataArray")
ppoints.appendChild(data_array)
data_array.setAttribute("type","Float32")
data_array.setAttribute("NumberOfComponents","3")
for name in files:
piece = doc.createElement("Piece")
pugrid.appendChild(piece)
piece.setAttribute("Source",name + ".vtu")
# print doc.toprettyxml(newl="\n")
filename = prefix + files[0].split('.')[0] + ".pvtu"
# print filename
handle = open(filename,'w')
doc.writexml(handle, newl="\n")
handle.close()
|
dymkowsk/mantid
|
tools/VTKConverter/VTKConvert.py
|
Python
|
gpl-3.0
| 5,106
|
[
"VTK"
] |
72b25a3a8279f68da4a2ed8217bdf60e027f48b4122c7218527de95fcc291cc7
|
from clang.cindex import Cursor, CursorKind
from ast.namespaces import Namespace
from ast.attributes.annotation import Annotation
class TranslationUnit(object):
""" Represents a processed translation unit
Unlike libclang does, a translation unit is not part of the AST itself
but its container. The root level of the processed AST is the global
namespace.
"""
def __init__(self, cursor, file):
self.cursor = cursor
self.file = str(file)
self.annotations = []
# Maybe the most tricky part of the parser:
#
# libclang doesn't add preprocessor data to the AST by default
# (Makes sense since, as the name suggest, pre-processing doesn't
# form part of compilation itself).
#
# You can however explicitly ask libclang to leave translation unit
# preprocessing info in the AST, so you can see include directives, macros,
# etc. But since preprocessing is done before generating the AST itself,
# libclang has no contextual information for preprocessor entities. In other
# words, it doesn't know in what scope (What class, function, etc) a macro was instanced.
# So it puts all the preprocessor things at the top level of the AST.
#
#
# How do you find the context where a macro was instanced? Here's a trick:
#
# Store all macro instantiations, and when processing the AST down, remember
# to match the current visited node with the list of "still not matched" macros.
# My criteria was that a macro is applied to a node (i.e. "matches") if it
# was instanced the line before the node.
#
# It was done this way since the primary goal of processing macros was to find annotations
# used to implement attributes:
#
# $(an attribute)
# void function();
#
for macro in self.cursor.get_children():
if macro.kind == CursorKind.MACRO_INSTANTIATION and \
macro.spelling in ('$', 'SIPLASPLAS_ANNOTATION'):
text = ' '.join([t.spelling for t in macro.get_tokens()][2:-2])
self.annotations.append(Annotation(macro, text))
self.mismatched_annotations = list(self.annotations)
self.mismatched_annotations.sort(key = lambda a: a.cursor.location.line)
self.root = Namespace.global_namespace(self)
def match_annotations(self, node):
""" Returns the list of annotations associated with a node """
if self.mismatched_annotations:
for macro in self.mismatched_annotations:
if str(macro.cursor.location.file) == str(node.cursor.location.file) and \
macro.cursor.extent.end.line == node.cursor.location.line - 1: # note we match extent.end.line, not location.line
# Beware of multiline annotations
self.mismatched_annotations.remove(macro)
macro.annotated_node = node
return [macro] # Only one annotation per node is supported (yet)
return []
else:
return []
def nodes(self):
""" Gives an iterable to visit all AST nodes recursively """
class nodeiter:
def __init__(self, node):
self.node = node
def __iter__(self):
childs = [nodeiter(c) for c in self.node.get_children()]
for child in childs:
for c in child:
yield c
yield self.node
return nodeiter(self.root)
|
Manu343726/siplasplas
|
src/reflection/parser/ast/translationunit.py
|
Python
|
mit
| 3,730
|
[
"VisIt"
] |
771a7c2f8d726dec4b7416aad170ffb4addbd67640149f229f93226e801bf378
|
import numpy as np
import mdtraj as md
import mdtraj.testing
from msmbuilder3 import (AngleVectorizer, DihedralVectorizer, PositionVectorizer,
DistanceVectorizer)
t = None
def setup():
global t
t = md.load(mdtraj.testing.get_fn('frame0.xtc'), top=mdtraj.testing.get_fn('native.pdb'))
def test_distance_vectorizer():
bi = [[0, 1], [3, 4,]]
reference = md.geometry.compute_distances(t, bi, periodic=False)
result = DistanceVectorizer(bi).transform(t)
np.testing.assert_array_equal(result, reference)
def test_angle_vectorizer():
ai = [[0,1,2], [3, 4, 5]]
reference = md.geometry.compute_angles(t, ai)
result = AngleVectorizer(ai).transform(t)
np.testing.assert_array_equal(result, reference)
def test_dihedral_vectorizer():
di = [[0,1,2, 3], [3, 4, 5, 6]]
reference = md.geometry.compute_dihedrals(t, di)
result = DihedralVectorizer(di).transform(t)
np.testing.assert_array_equal(result, reference)
def test_position_vectorizer():
reference = np.array(map(lambda xyz: md.geometry.alignment.transform(xyz, t.xyz[0]), t.xyz))
result = PositionVectorizer(t).transform(t)
np.testing.assert_array_equal(result, reference.reshape((t.n_frames, t.n_atoms*3)))
t2 = PositionVectorizer(t).inverse_transform(result)
assert isinstance(t2, md.Trajectory)
for i in range(t.n_frames):
assert md.geometry.alignment.rmsd_qcp(t.xyz[i], t2.xyz[i]) < 1e-3
|
rmcgibbo/msmbuilder3
|
tests/test_vectorizers.py
|
Python
|
gpl-3.0
| 1,457
|
[
"MDTraj"
] |
93b3e754e11b254579fbf348aff99eadd0cd772b45f1a1f0a6ef8eef0b577206
|
#! /usr/freeware/bin/python
#
# This is dump2trj, a program written by Keir E. Novik to convert
# Lammps position dump files to Amber trajectory files.
#
# Copyright 2000, 2001 Keir E. Novik; all rights reserved.
#
# Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README
#
#============================================================
def Convert_files():
'Handle the whole conversion process'
print
print 'Welcome to dump2trj, a program to convert Lammps position dump files to\nAmber trajectory format!'
print
Basename_list = Find_dump_files()
for Basename in Basename_list:
t = Trajectory()
if t.Read_dump(Basename):
t.Write_trj(Basename)
del t
print
#============================================================
def Find_dump_files():
'Look for sets of Lammps position dump files to process'
'''If passed something on the command line, treat it as a list of
files to process. Otherwise, look for *.dump in the current
directory.
'''
import os, sys
Basename_list = []
# Extract basenames from command line
for Name in sys.argv[1:]:
if Name[-5:] == '.dump':
Basename_list.append(Name[:-5])
else:
Basename_list.append(Name)
if Basename_list == []:
print 'Looking for Lammps dump files...',
Dir_list = os.listdir('.')
for Filename in Dir_list:
if Filename[-5:] == '.dump':
Basename_list.append(Filename[:-5])
Basename_list.sort()
if Basename_list != []:
print 'found',
for i in range(len(Basename_list)-1):
print Basename_list[i] + ',',
print Basename_list[-1] + '\n'
if Basename_list == []:
print 'none.\n'
return Basename_list
#============================================================
class Snapshot:
def __init__(self, The_trajectory):
'Initialise the Snapshot class'
self.timestep = The_trajectory.timestep
self.atoms = The_trajectory.atoms
self.xlo = The_trajectory.xlo
self.xhi = The_trajectory.xhi
self.ylo = The_trajectory.ylo
self.yhi = The_trajectory.yhi
self.zlo = The_trajectory.zlo
self.zhi = The_trajectory.zhi
#--------------------------------------------------------
def Read_dump(self, Lines):
'Read a snapshot (timestep) from a Lammps position dump file'
'''Trajectory.Read_dump() will pass us only the lines we need.
'''
self.Atom_list = Lines
#--------------------------------------------------------
def Write_trj(self, F):
'Write a snapshot (timestep) to an Amber trajectory file'
'''The Atom_list must be sorted, as it may not be in order
(for example, in a parallel Lammps simulation).
'''
import string
xBOX = (self.xhi - self.xlo)
yBOX = (self.yhi - self.ylo)
zBOX = (self.zhi - self.zlo)
Min = min(self.xlo, self.ylo, self.zlo)
Max = max(self.xhi, self.yhi, self.zhi, xBOX, yBOX, zBOX)
if Min <= -1000 or Max >= 10000:
print '(error: coordinates too large!)'
return
Print_list = []
for Line in NumericalSort(self.Atom_list):
Item_list = string.split(Line)
x = xBOX * (Float(Item_list[2])+Float(Item_list[5])) # Modified main box x-coordinate to actual x-coordinate
y = yBOX * (Float(Item_list[3])+Float(Item_list[6])) # Modified main box y-coordinate to actual y-coordinate
z = zBOX * (Float(Item_list[4])+Float(Item_list[7])) # Modified main box z-coordinate to actual z-coordinate
Print_list.append('%(x)8.3f' % vars())
Print_list.append('%(y)8.3f' % vars())
Print_list.append('%(z)8.3f' % vars())
if len(Print_list) > 9:
Line = ''
for j in range(10):
Line = Line + Print_list[j]
Line = Line + '\n'
Print_list = Print_list[10:]
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
if len(Print_list) > 0:
Line = ''
for j in range(len(Print_list)):
Line = Line + Print_list[j]
Line = Line + '\n'
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
Line = '%(xBOX)8.3f%(yBOX)8.3f%(zBOX)8.3f\n' % vars()
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
#============================================================
class Trajectory:
def Read_dump(self, Basename):
'Read a Lammps position dump file'
import string, sys
Filename = Basename + '.dump'
print 'Reading', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return 0
try:
Lines = F.readlines()
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return 0
F.close()
i = 0
self.Snapshot_list = []
# Parse the dump
while i < len(Lines):
if string.find(Lines[i], 'ITEM: TIMESTEP') != -1:
# Read the timestep
self.timestep = int(Lines[i+1])
i = i + 2
elif string.find(Lines[i], 'ITEM: NUMBER OF ATOMS') != -1:
# Read the number of atoms
self.atoms = int(Lines[i+1])
i = i + 2
elif string.find(Lines[i], 'ITEM: BOX BOUNDS') != -1:
# Read the periodic box boundaries
Item_list = string.split(Lines[i+1])
self.xlo = Float(Item_list[0])
self.xhi = Float(Item_list[1])
Item_list = string.split(Lines[i+2])
self.ylo = Float(Item_list[0])
self.yhi = Float(Item_list[1])
Item_list = string.split(Lines[i+3])
self.zlo = Float(Item_list[0])
self.zhi = Float(Item_list[1])
i = i + 4
elif string.find(Lines[i], 'ITEM: ATOMS') != -1:
# Read atom positions
self.Snapshot_list.append(Snapshot(self))
Start = i + 1
End = Start + self.atoms
self.Snapshot_list[-1].Read_dump(Lines[Start:End])
i = i + self.atoms + 1
else:
print '(error: unknown line in file!)'
return
print 'done.'
return 1
#--------------------------------------------------------
def Write_trj(self, Basename):
'Write an Amber trajectory file'
import os, sys
Filename = Basename + '.mdcrd'
Dir_list = os.listdir('.')
i = 1
while Filename in Dir_list:
Filename = Basename + `i` + '.mdcrd'
i = i + 1
del i
print 'Writing', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename, 'w')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
F.write(Basename + '\n')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
for S in self.Snapshot_list:
S.Write_trj(F)
F.close()
print 'done.'
#============================================================
def Float(s):
'Return the string s as a float, if possible'
try:
x = float(s)
except ValueError:
if s[-1] == ',':
s = s[:-1]
x = float(s)
return x
#============================================================
def NumericalSort(String_list):
'Sort a list of strings by the integer value of the first element'
import string
Working_list = []
for s in String_list:
Working_list.append((int(string.split(s)[0]), s))
Working_list.sort()
Return_list = []
for Tuple in Working_list:
Return_list.append(Tuple[1])
return Return_list
#============================================================
Convert_files()
|
ganzenmg/lammps_current
|
tools/amber2lmp/dump2trj.py
|
Python
|
gpl-2.0
| 8,767
|
[
"Amber",
"LAMMPS"
] |
06e0f66101a1c8e33dd81e08255f7a6547632243b527f679d57abb76745269a7
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
from bigdl.dllib.utils.utils import get_node_ip
# Assumption:
# 1. All hosts has oneCCL installed
# 2. The driver can ssh all hosts without password
# 3. All hosts have the same working directory.
# 4. All hosts have the same Python environment in the same location.
class MPIRunner:
def __init__(self,
hosts=None,
processes_per_node=1,
env=None):
driver_ip = get_node_ip()
if hosts is None: # Single node
self.hosts = [driver_ip]
elif hosts == "all": # All executor nodes in the cluster
def get_ip(iter):
yield get_node_ip()
from bigdl.dllib.utils.common import get_node_and_core_number
from bigdl.orca import OrcaContext
sc = OrcaContext.get_spark_context()
node_num, core_num = get_node_and_core_number()
total_cores = node_num * core_num
self.hosts = list(set(sc.range(0, total_cores, numSlices=total_cores).barrier()
.mapPartitions(get_ip).collect()))
else: # User specified hosts, assumed to be non-duplicate
assert isinstance(hosts, list)
self.hosts = hosts
self.master = self.hosts[0]
print("Master: ", self.master)
self.remote_hosts = []
for host in self.hosts:
if host != driver_ip:
self.remote_hosts.append(host)
print("Remote hosts: ", self.remote_hosts)
print("Hosts: ", self.hosts)
self.processes_per_node = processes_per_node
self.env = env if env else {}
def run(self, file, **kwargs):
file_path = os.path.abspath(file)
assert os.path.exists(file_path)
file_dir = "/".join(file_path.split("/")[:-1])
self.scp_file(file_path, file_dir)
# cmd = ["mpiexec.openmpi"]
cmd = ["mpiexec.hydra"]
# -l would label the output with process rank. -l/-ppn not available for openmpi.
# mpi_config = "-np {} ".format(
mpi_config = "-np {} -ppn {} -l ".format(
self.processes_per_node * len(self.hosts),
self.processes_per_node)
mpi_env = os.environ.copy()
mpi_env.update(self.env)
if "I_MPI_PIN_DOMAIN" in mpi_env:
mpi_config += "-genv I_MPI_PIN_DOMAIN={} ".format(mpi_env["I_MPI_PIN_DOMAIN"])
if "OMP_NUM_THREADS" in mpi_env:
mpi_config += "-genv OMP_NUM_THREADS={} ".format(mpi_env["OMP_NUM_THREADS"])
if len(self.remote_hosts) > 0:
mpi_config += "-hosts {}".format(",".join(self.hosts))
cmd.extend(mpi_config.split())
# cmd.append("ls")
cmd.append(sys.executable)
cmd.append("-u") # This can print as the program runs
cmd.append(file_path)
for k, v in kwargs.items():
cmd.append("--{}={}".format(str(k), str(v)))
print(cmd)
if len(self.remote_hosts) > 0:
mpi_env["MASTER_ADDR"] = str(self.master)
else: # Single node
mpi_env["MASTER_ADDR"] = "127.0.0.1"
# print(mpi_env)
process = subprocess.Popen(cmd, env=mpi_env)
process.wait()
def scp_file(self, file, remote_dir):
for host in self.remote_hosts:
p = subprocess.Popen(["scp", file,
"root@{}:{}/".format(host, remote_dir)])
os.waitpid(p.pid, 0)
def launch_plasma(self, object_store_memory="2g"):
import atexit
atexit.register(self.shutdown_plasma)
# TODO: Or can use spark to launch plasma
from bigdl.orca.ray.utils import resource_to_bytes
self.plasma_path = "/".join(sys.executable.split("/")[:-1] + ["plasma_store"])
self.object_store_memory = resource_to_bytes(object_store_memory)
self.object_store_address = "/tmp/analytics_zoo_plasma"
command = "{} -m {} -s {}".format(
self.plasma_path, self.object_store_memory, self.object_store_address)
for host in self.hosts:
if host != get_node_ip():
p = subprocess.Popen(["ssh", "root@{}".format(host), command])
else:
p = subprocess.Popen(command.split())
print("Plasma launched on {}".format(host))
return self.object_store_address
def shutdown_plasma(self):
for host in self.hosts:
if host != get_node_ip():
p = subprocess.Popen(["ssh", "root@{}".format(host), "pkill plasma"])
else:
p = subprocess.Popen(["pkill", "plasma"])
os.waitpid(p.pid, 0)
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/mpi/mpi_runner.py
|
Python
|
apache-2.0
| 5,265
|
[
"ORCA"
] |
d3d283de49d36ae90b8f7ebf249cb74fe98ad5eab45484d42d61698b335baef1
|
import logging
import os
import re
import socket
import sys
import time
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.technical_response import \
null_technical_500_response
from django_extensions.management.utils import (
RedirectHandler, setup_logger, signalcommand,
)
try:
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = True
elif 'staticfiles' in settings.INSTALLED_APPS:
from staticfiles.handlers import StaticFilesHandler # noqa
USE_STATICFILES = True
else:
USE_STATICFILES = False
except ImportError:
USE_STATICFILES = False
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
DEFAULT_POLLER_RELOADER_INTERVAL = getattr(settings, 'RUNSERVERPLUS_POLLER_RELOADER_INTERVAL', 1)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--nothreading', action='store_false', dest='threaded',
help='Do not run in multithreaded mode.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--cert', dest='cert_path', action="store", type="string",
help='To use SSL, specify certificate path.'),
make_option('--extra-file', dest='extra_files', action="append", type="string",
help='auto-reload whenever the given file changes too (can be specified multiple times)'),
make_option('--reloader-interval', dest='reloader_interval', action="store", type="int", default=DEFAULT_POLLER_RELOADER_INTERVAL,
help='After how many seconds auto-reload should scan for updates in poller-mode [default=%s]' % DEFAULT_POLLER_RELOADER_INTERVAL),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
@signalcommand
def handle(self, addrport='', *args, **options):
import django
# Do not use default ending='\n', because StreamHandler() takes care of it
if hasattr(self.stderr, 'ending'):
self.stderr.ending = None
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
try:
# Django 1.7 onwards
from django.db.backends import utils
except ImportError:
# Django 1.6 below
from django.db.backends import util as utils
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
utils.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
# Set colored output
if settings.DEBUG:
try:
set_werkzeug_log_color()
except: # We are dealing with some internals, anything could go wrong
print("Wrapping internal werkzeug logger for color highlighting has failed!")
pass
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not addrport:
try:
addrport = settings.RUNSERVERPLUS_SERVER_ADDRESS_PORT
except AttributeError:
pass
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." %
self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.'
% self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
threaded = options.get('threaded', True)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
bind_url = "http://%s:%s/" % (
self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
extra_files = options.get('extra_files', None) or []
reloader_interval = options.get('reloader_interval', 1)
def inner_run():
print("Validating models...")
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at %s" % (bind_url,))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
webbrowser.open(bind_url)
if cert_path:
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
dir_path, cert_file = os.path.split(cert_path)
if not dir_path:
dir_path = os.getcwd()
root, ext = os.path.splitext(cert_file)
certfile = os.path.join(dir_path, root + ".crt")
keyfile = os.path.join(dir_path, root + ".key")
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and \
os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(
os.path.join(dir_path, root), host='localhost')
except ImportError:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
if use_reloader and settings.USE_I18N:
try:
from django.utils.autoreload import gen_filenames
except ImportError:
pass
else:
extra_files.extend(filter(lambda filename: filename.endswith('.mo'), gen_filenames()))
run_simple(
self.addr,
int(self.port),
DebuggedApplication(handler, True),
use_reloader=use_reloader,
use_debugger=True,
extra_files=extra_files,
reloader_interval=reloader_interval,
threaded=threaded,
ssl_context=ssl_context,
)
inner_run()
def set_werkzeug_log_color():
"""Try to set color to the werkzeug log.
"""
from django.core.management.color import color_style
from werkzeug.serving import WSGIRequestHandler
from werkzeug._internal import _log
_style = color_style()
_orig_log = WSGIRequestHandler.log
def werk_log(self, type, message, *args):
try:
msg = '%s - - [%s] %s' % (
self.address_string(),
self.log_date_time_string(),
message % args,
)
http_code = str(args[1])
except:
return _orig_log(type, message, *args)
# Utilize terminal colors, if available
if http_code[0] == '2':
# Put 2XX first, since it should be the common case
msg = _style.HTTP_SUCCESS(msg)
elif http_code[0] == '1':
msg = _style.HTTP_INFO(msg)
elif http_code == '304':
msg = _style.HTTP_NOT_MODIFIED(msg)
elif http_code[0] == '3':
msg = _style.HTTP_REDIRECT(msg)
elif http_code == '404':
msg = _style.HTTP_NOT_FOUND(msg)
elif http_code[0] == '4':
msg = _style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = _style.HTTP_SERVER_ERROR(msg)
_log(type, msg)
WSGIRequestHandler.log = werk_log
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/django_extensions/management/commands/runserver_plus.py
|
Python
|
agpl-3.0
| 14,020
|
[
"VisIt"
] |
abcebb9916a5ce9f454b38a0430b592db86c040702e34b72ddb3ed4b90db0af0
|
import pytest
import os
import glob
import unittest
import tempfile
import filecmp
import numpy.testing.utils as nptu
import numpy as np
from fireworks.core.rocket_launcher import rapidfire
from abipy.dynamics.hist import HistFile
from abipy.flowtk.events import DilatmxError
from abiflows.fireworks.workflows.abinit_workflows import RelaxFWWorkflow
from abiflows.fireworks.tasks.abinit_tasks import RelaxFWTask
from abiflows.fireworks.utils.fw_utils import get_fw_by_task_index,load_abitask,get_last_completed_launch
from abiflows.core.testing import AbiflowsIntegrationTest
#ABINIT_VERSION = "8.6.1"
# pytestmark = [pytest.mark.skipif(not has_abinit(ABINIT_VERSION), reason="Abinit version {} is not in PATH".format(ABINIT_VERSION)),
# pytest.mark.skipif(not has_fireworks(), reason="fireworks package is missing"),
# pytest.mark.skipif(not has_mongodb(), reason="no connection to mongodb")]
pytestmark = pytest.mark.usefixtures("cleandb")
class ItestRelax(AbiflowsIntegrationTest):
def itest_relax_wf(self, lp, fworker, tmpdir, inputs_relax_si_low, use_autoparal, db_data):
"""
Tests the basic functionality of a RelaxFWWorkflow with autoparal True and False.
"""
wf = RelaxFWWorkflow(*inputs_relax_si_low, autoparal=use_autoparal,
initialization_info={"kppa": 100})
wf.add_mongoengine_db_insertion(db_data)
wf.add_final_cleanup(["WFK"])
initial_ion_structure = inputs_relax_si_low[0].structure
ion_fw_id = wf.ion_fw.fw_id
ioncell_fw_id = wf.ioncell_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
ion_fw_id = old_new[ion_fw_id]
ioncell_fw_id = old_new[ioncell_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(ion_fw_id)
assert wf.state == "COMPLETED"
ioncell_fw = get_fw_by_task_index(wf, "ioncell", index=-1)
ioncell_task = load_abitask(ioncell_fw)
ioncell_hist_path = ioncell_task.outdir.has_abiext("HIST")
with HistFile(ioncell_hist_path) as hist:
initial_ioncell_structure = hist.structures[0]
assert initial_ion_structure != initial_ioncell_structure
# check the effect of the final cleanup
assert len(glob.glob(os.path.join(ioncell_task.outdir.path, "*_WFK"))) == 0
assert len(glob.glob(os.path.join(ioncell_task.outdir.path, "*_DEN"))) > 0
assert len(glob.glob(os.path.join(ioncell_task.tmpdir.path, "*"))) == 0
assert len(glob.glob(os.path.join(ioncell_task.indir.path, "*"))) == 0
# check the result in the DB
from abiflows.database.mongoengine.abinit_results import RelaxResult
with db_data.switch_collection(RelaxResult) as RelaxResult:
results = RelaxResult.objects()
assert len(results) == 1
r = results[0]
# test input structure
assert r.abinit_input.structure.to_mgobj() == initial_ion_structure
# test output structure
# remove site properties, otherwise the "cartesian_forces" won't match due to the presence of a
# list instead of an array in the deserialization
db_structure = r.abinit_output.structure.to_mgobj()
for s in db_structure:
s.properties = {}
hist_structure = hist.structures[-1]
for s in hist_structure:
s.properties = {}
assert db_structure == hist_structure
assert r.abinit_input.ecut == inputs_relax_si_low[0]['ecut']
assert r.abinit_input.kppa == 100
nptu.assert_array_equal(r.abinit_input.last_input.to_mgobj()['ngkpt'], inputs_relax_si_low[0]['ngkpt'])
with tempfile.NamedTemporaryFile(mode="wb") as db_file:
db_file.write(r.abinit_output.gsr.read())
db_file.seek(0)
assert filecmp.cmp(ioncell_task.gsr_path, db_file.name)
if self.check_numerical_values:
with ioncell_task.open_gsr() as gsr:
assert gsr.energy == pytest.approx(-240.28203726305696, rel=0.01)
assert np.allclose((3.8101419256822333, 3.8101444012342616, 3.8101434297177068),
gsr.structure.lattice.abc, rtol=0.05)
def itest_uncoverged(self, lp, fworker, tmpdir, inputs_relax_si_low):
"""
Testing restart when the ionic convercence is not reached
"""
inputs_relax_si_low[0]['ntime']=3
wf = RelaxFWWorkflow(*inputs_relax_si_low, autoparal=False)
initial_ion_structure = inputs_relax_si_low[0].structure
ion_fw_id = wf.ion_fw.fw_id
ioncell_fw_id = wf.ioncell_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
ion_fw_id = old_new[ion_fw_id]
ioncell_fw_id = old_new[ioncell_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)
ion_fw = lp.get_fw_by_id(ion_fw_id)
ioncell_fw = lp.get_fw_by_id(ioncell_fw_id)
assert ion_fw.state == "COMPLETED"
assert ioncell_fw.state == "WAITING"
launch = ion_fw.launches[-1]
assert any(event.yaml_tag == RelaxFWTask.CRITICAL_EVENTS[0].yaml_tag for event in launch.action.stored_data['report'])
links_ion = lp.get_wf_by_fw_id(ion_fw_id).links[ion_fw_id]
# there should be an additional child (the detour)
assert len(links_ion) == 2
links_ion.remove(ioncell_fw_id)
fw_detour_id = links_ion[0]
# run the detour
rapidfire(lp, fworker, m_dir=str(tmpdir))
fw_detour = lp.get_fw_by_id(fw_detour_id)
assert fw_detour.state == "COMPLETED"
restart_structure = fw_detour.spec['_tasks'][0].abiinput.structure
wf = lp.get_wf_by_fw_id(ion_fw_id)
assert wf.state == "COMPLETED"
# check that the structure has been updated when restarting
assert initial_ion_structure != restart_structure
if self.check_numerical_values:
last_ioncell_task = load_abitask(get_fw_by_task_index(wf, "ioncell", index=-1))
with last_ioncell_task.open_gsr() as gsr:
assert gsr.energy == pytest.approx(-240.28203726305696, rel=0.01)
assert gsr.structure.lattice.abc == pytest.approx(
np.array((3.8101428225862084, 3.810143911539674, 3.8101432797789698)), rel=0.05)
def itest_dilatmx(self, lp, fworker, tmpdir, inputs_relax_si_low):
"""
Test the workflow with a target dilatmx
"""
# set the dilatmx to 1.05 to keep the change independt on the generation of the input
inputs_relax_si_low[1]['dilatmx'] = 1.05
wf = RelaxFWWorkflow(*inputs_relax_si_low, autoparal=False, target_dilatmx=1.03)
initial_ion_structure = inputs_relax_si_low[0].structure
ion_fw_id = wf.ion_fw.fw_id
ioncell_fw_id = wf.ioncell_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
ion_fw_id = old_new[ion_fw_id]
ioncell_fw_id = old_new[ioncell_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=2)
ion_fw = lp.get_fw_by_id(ion_fw_id)
ioncell_fw = lp.get_fw_by_id(ioncell_fw_id)
assert ion_fw.state == "COMPLETED"
assert ioncell_fw.state == "COMPLETED"
launch = ioncell_fw.launches[-1]
links_ioncell = lp.get_wf_by_fw_id(ioncell_fw_id).links[ioncell_fw_id]
# there should be an additional child (the detour)
assert len(links_ioncell) == 1
fw_detour_id = links_ioncell[0]
# run the detour with lowered dilatmx
rapidfire(lp, fworker, m_dir=str(tmpdir))
fw_detour = lp.get_fw_by_id(fw_detour_id)
assert fw_detour.state == "COMPLETED"
detour_abiinput = fw_detour.spec['_tasks'][0].abiinput
assert detour_abiinput['dilatmx'] == 1.03
restart_structure = detour_abiinput.structure
# check that the structure has been updated when restarting
assert initial_ion_structure != restart_structure
wf = lp.get_wf_by_fw_id(ion_fw_id)
assert wf.state == "COMPLETED"
# check that the structure has been updated when restarting
assert initial_ion_structure != restart_structure
if self.check_numerical_values:
last_ioncell_task = load_abitask(get_fw_by_task_index(wf, "ioncell", index=-1))
with last_ioncell_task.open_gsr() as gsr:
assert gsr.structure.lattice.abc == pytest.approx(
np.array((3.8101419255677951, 3.8101444011173897, 3.8101434296150889)), rel=0.05)
def itest_dilatmx_error(self, lp, fworker, tmpdir, inputs_relax_si_low, db_data):
"""
Test the workflow when a dilatmx error shows up.
Also tests the skip_ion option of RelaxFWWorkflow
"""
# set the dilatmx to a small value, so that the dilatmx error will show up
initial_dilatmx = 1.001
inputs_relax_si_low[1]['dilatmx'] = initial_dilatmx
# also test the skip_ion
wf = RelaxFWWorkflow(*inputs_relax_si_low, autoparal=False, skip_ion=True)
wf.add_mongoengine_db_insertion(db_data)
initial_ion_structure = inputs_relax_si_low[0].structure
ioncell_fw_id = wf.ioncell_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
ioncell_fw_id = old_new[ioncell_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)
ioncell_fw = lp.get_fw_by_id(ioncell_fw_id)
assert ioncell_fw.state == "COMPLETED"
launch = ioncell_fw.launches[-1]
assert any(event.yaml_tag == DilatmxError.yaml_tag for event in launch.action.stored_data['report'])
links_ioncell = lp.get_wf_by_fw_id(ioncell_fw_id).links[ioncell_fw_id]
# there should be an additional child (the detour)
assert len(links_ioncell) == 2
# run the detour restarting froom previous structure
rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)
wf = lp.get_wf_by_fw_id(ioncell_fw_id)
fw_detour = get_fw_by_task_index(wf, "ioncell", index=2)
assert fw_detour.state == "COMPLETED"
detour_abiinput = fw_detour.spec['_tasks'][0].abiinput
assert detour_abiinput['dilatmx'] == initial_dilatmx
restart_structure = detour_abiinput.structure
# check that the structure has been updated when restarting
assert initial_ion_structure != restart_structure
# complete the wf. Just check that the saving without the ion tasks completes without error
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(ioncell_fw_id)
assert wf.state == "COMPLETED"
|
gmatteo/abiflows
|
abiflows/fireworks/integration_tests/itest_relax.py
|
Python
|
gpl-2.0
| 10,747
|
[
"ABINIT"
] |
4035f512323b1af3861964cfab91249de69a14faed82076c22491d4ad5d0dd25
|
"""
Utilities for ComponentMonitoring features
"""
import datetime
import socket
from DIRAC import S_OK
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
def monitorInstallation(componentType, system, component, module=None, cpu=None, hostname=None):
"""
Register the installation of a component in the ComponentMonitoringDB
"""
monitoringClient = ComponentMonitoringClient()
if not module:
module = component
# Retrieve user installing the component
user = None
result = getProxyInfo()
if result["OK"]:
proxyInfo = result["Value"]
if "username" in proxyInfo:
user = proxyInfo["username"]
else:
return result
if not user:
user = "unknown"
if not cpu:
cpu = "Not available"
for line in open("/proc/cpuinfo"):
if line.startswith("model name"):
cpu = line.split(":")[1][0:64]
cpu = cpu.replace("\n", "").lstrip().rstrip()
if not hostname:
hostname = socket.getfqdn()
instance = component[0:32]
result = monitoringClient.installationExists(
{"Instance": instance, "UnInstallationTime": None},
{"Type": componentType, "DIRACSystem": system, "DIRACModule": module},
{"HostName": hostname, "CPU": cpu},
)
if not result["OK"]:
return result
if result["Value"]:
return S_OK("Monitoring of %s is already enabled" % component)
result = monitoringClient.addInstallation(
{"InstallationTime": datetime.datetime.utcnow(), "InstalledBy": user, "Instance": instance},
{"Type": componentType, "DIRACSystem": system, "DIRACModule": module},
{"HostName": hostname, "CPU": cpu},
True,
)
return result
def monitorUninstallation(system, component, cpu=None, hostname=None):
"""
Register the uninstallation of a component in the ComponentMonitoringDB
"""
monitoringClient = ComponentMonitoringClient()
# Retrieve user uninstalling the component
user = None
result = getProxyInfo()
if result["OK"]:
proxyInfo = result["Value"]
if "username" in proxyInfo:
user = proxyInfo["username"]
else:
return result
if not user:
user = "unknown"
if not cpu:
cpu = "Not available"
for line in open("/proc/cpuinfo"):
if line.startswith("model name"):
cpu = line.split(":")[1][0:64]
cpu = cpu.replace("\n", "").lstrip().rstrip()
if not hostname:
hostname = socket.getfqdn()
instance = component[0:32]
result = monitoringClient.updateInstallations(
{"Instance": instance, "UnInstallationTime": None},
{"DIRACSystem": system},
{"HostName": hostname, "CPU": cpu},
{"UnInstallationTime": datetime.datetime.utcnow(), "UnInstalledBy": user},
)
return result
|
DIRACGrid/DIRAC
|
src/DIRAC/FrameworkSystem/Utilities/MonitoringUtilities.py
|
Python
|
gpl-3.0
| 3,011
|
[
"DIRAC"
] |
155d2d39cd9c7c3185378f54c7e958c41b6dfcf7cffbb22b4b8ffed31ffb0f9f
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from json import dumps
from Plugins.Extensions.OpenWebif.local import tstrings
import datetime
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.375729
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/mobile/timerlist.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class timerlist(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(timerlist, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
\t<script type="text/javascript" src="/js/openwebif.js"></script>\r
\t<script type="text/javascript">initJsTranslation(''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"tstrings",True)) # u'$dumps($tstrings)' on line 15, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($tstrings)')) # from line 15, col 51.
write(u''')</script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 22, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 22, col 49.
write(u'''</div>\r
\t\t\t<!-- <div class="button-bold">+</div> -->\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['timer_list'] # u"$tstrings['timer_list']" on line 29, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['timer_list']")) # from line 29, col 64.
write(u'''</li>\r
''')
for timer in VFFSL(SL,"timers",True): # generated from line 30, col 5
duration = VFFSL(SL,"timer.duration",True)/60
starttime = datetime.datetime.fromtimestamp(VFFSL(SL,"timer.begin",True)).strftime("%d.%m.%Y")
endtime = datetime.datetime.fromtimestamp(VFFSL(SL,"timer.end",True)).strftime("%d.%m.%Y")
write(u'''\t\t\t\t<li>\r
''')
sref = quote(VFFSL(SL,"timer.serviceref",True), safe=' ~@#$&()*!+=:;,.?/\'')
name = quote(VFFSL(SL,"timer.name",True), safe=' ~@#$&()*!+=:;,.?/\'').replace("'","\\'")
write(u'''\t\t\t\t\t<a href="javascript:history.go(0)" onClick="deleteTimer(\'''')
_v = VFFSL(SL,"sref",True) # u'$sref' on line 37, col 63
if _v is not None: write(_filter(_v, rawExpr=u'$sref')) # from line 37, col 63.
write(u"""', '""")
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 37, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 37, col 72.
write(u"""', '""")
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 37, col 88
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 37, col 88.
write(u"""', '""")
_v = VFFSL(SL,"name",True) # u'$name' on line 37, col 102
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 37, col 102.
write(u'''\');">\r
\t\t\t\t\t\t<span class="ui-li-heading" style="margin-top: 3px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"timer.name",True) # u'$timer.name' on line 38, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$timer.name')) # from line 38, col 80.
write(u''' (''')
_v = VFFSL(SL,"timer.servicename",True) # u'$timer.servicename' on line 38, col 93
if _v is not None: write(_filter(_v, rawExpr=u'$timer.servicename')) # from line 38, col 93.
write(u''')</span>\r
\t\t\t\t\t\t<span class="ui-li-desc" style="margin-top: 3px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"starttime",True) # u'$starttime' on line 39, col 77
if _v is not None: write(_filter(_v, rawExpr=u'$starttime')) # from line 39, col 77.
write(u''' - ''')
_v = VFFSL(SL,"endtime",True) # u'$endtime' on line 39, col 90
if _v is not None: write(_filter(_v, rawExpr=u'$endtime')) # from line 39, col 90.
write(u''' (''')
_v = VFFSL(SL,"duration",True) # u'$duration' on line 39, col 100
if _v is not None: write(_filter(_v, rawExpr=u'$duration')) # from line 39, col 100.
write(u''' min)</span>\r
\t\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t\t<button onClick="document.location.reload(true)">''')
_v = VFFSL(SL,"tstrings",True)['refresh'] # u"$tstrings['refresh']" on line 44, col 53
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['refresh']")) # from line 44, col 53.
write(u'''</button>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 49, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 49, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_timerlist= 'respond'
## END CLASS DEFINITION
if not hasattr(timerlist, '_initCheetahAttributes'):
templateAPIClass = getattr(timerlist, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(timerlist)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=timerlist()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/mobile/timerlist.py
|
Python
|
gpl-2.0
| 9,771
|
[
"VisIt"
] |
4d0b28bc7f379c642ec4ecb6cfda0339ae6e3c8b9c62392e71eaf3667532801e
|
# modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
# NO plotting - just saving the results: LOG-response spectra for each sigma and max-LOG response numerical spectra
import os, shutil
import time, datetime
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
testScriptsFolder = dp.root + 'python/armor/tests/'
testName = "modifiedMexicanHatTest12_march2014"
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime]
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
march2014wrf = ob.march2014wrf
################################################################################
# hack
#kongrey.list = [v for v in kongrey.list if v.dataTime>="20130828.2320"]
################################################################################
# parameters
#sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
#dbzstreams = [kongrey]
#sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64]
#dbzstreams = [kongreywrf]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
dbzstreams = [march2014]
sigmaPower=0
scaleSpaceExponent = 0 #switch on scalespace analysis: scaleSpaceExponent=2
# end parameters
################################################################################
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in dbzstreams:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
#hack
#streamMean = np.array([135992.57472004235, 47133.59049120619, 16685.039217734946, 11814.043851969862, 5621.567482638702, 3943.2774923729303, 1920.246102887001, 1399.7855335686243, 760.055614122099, 575.3654495432361, 322.26668666562375, 243.49842951291757, 120.54647935045809, 79.05741086463254, 26.38971066782135])
#dbzCount = 140
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
a.responseImages = [] #2014-05-02
#for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
for sigma in sigmas:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) #2014-05-07
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) *sigma**scaleSpaceExponent #2014-05-09
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
pickle.dump(a.responseImages, open(outputFolder+a.name+"responseImagesList.pydump",'w'))
a_LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+a1.name+"_LOGspec.png",
outputPath = outputFolder+a1.name+"_LOGspec.dat",
cmap = 'jet',
)
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
#print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
print "saving to:", a_LOGspec.imagePath
#a_LOGspec.saveImage()
print a_LOGspec.outputPath
#a_LOGspec.saveMatrix()
#a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png")
pickle.dump(a_LOGspec, open(outputFolder+ a_LOGspec.name + ".pydump","w"))
# end computing the sigma for which the LOG has max response
# 2014-05-02
##############################################################################
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
sigmas = np.array([v['sigma'] for v in L])
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(sigmas,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name: " + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma=\t\t" + str(sigmas.tolist()) + '\n'
summaryText += "streamMean=\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean* (sigmas**sigmaPower))
plt.title(ds.name + '- average laplacian-of-gaussian numerical spectrum\n' +\
'for ' +str(dbzCount) + ' DBZ patterns\n' +\
'suppressed by a factor of sigma^' + str(sigmaPower) )
plt.savefig(outputFolder + ds.name + "_average_LoG_numerical_spectrum.png")
plt.close()
summaryFile.close()
|
yaukwankiu/armor
|
tests/modifiedMexicanHatTest12_march2014.py
|
Python
|
cc0-1.0
| 7,587
|
[
"Gaussian"
] |
38912a2ba8c8f81c6c1d4e936e6cd7da78f6e4add24deba7c16c12d11e22b573
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006-2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib/domain/payment/group.py'
from decimal import Decimal
from nose.exc import SkipTest
from stoqlib.database.runtime import get_current_branch
from stoqlib.domain.commission import CommissionSource, Commission
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.sale import Sale
from stoqlib.domain.stockdecrease import StockDecrease
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.parameters import sysparam
StockDecrease # pylint: disable=W0104
class TestPaymentGroup(DomainTest):
def setUp(self):
# FIXME: On some tests where PaymentGroup._renegotiation is accessed,
# a traceback ocours because PaymentRenegotiation were not imported.
# We can't import it on PaymentGroup since it would generate an import
# loop error. This is a potential problem on Stoq and we should be
# fixed there.
from stoqlib.domain.payment.renegotiation import PaymentRenegotiation
PaymentRenegotiation # pylint: disable=W0104
super(TestPaymentGroup, self).setUp()
def _payComissionWhenConfirmed(self):
sysparam.set_bool(
self.store,
"SALE_PAY_COMMISSION_WHEN_CONFIRMED",
True)
self.failUnless(
sysparam.get_bool('SALE_PAY_COMMISSION_WHEN_CONFIRMED'))
def test_remove_item(self):
payment = self.create_payment()
with self.assertRaises(AttributeError):
payment.group.remove_item(payment=None)
self.assertIsNone(payment.group.remove_item(payment=payment))
def test_installments_number(self):
payment = self.create_payment()
self.assertEquals(payment.group.installments_number, 1)
def test_get_payments_sum(self):
payment = self.create_payment()
payments = payment.group.get_valid_payments()
result = payment.group._get_payments_sum(payments=payments,
attr=Payment.value)
self.assertEquals(result, 10)
def test_clear_unused(self):
payment = self.create_payment()
payment2 = self.create_payment(group=payment.group)
payment2.status = Payment.STATUS_PREVIEW
self.assertEquals(payment.group._get_preview_payments().count(), 2)
payment.group.clear_unused()
with self.assertRaises(AttributeError):
payment.group._get_preview_payments()
def test_confirm(self):
branch = self.create_branch()
group = self.create_payment_group()
method = PaymentMethod.get_by_name(self.store, u'bill')
payment1 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment2 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment2.set_pending()
self.assertEqual(payment1.status, Payment.STATUS_PREVIEW)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
group.confirm()
self.assertEqual(payment1.status, Payment.STATUS_PENDING)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
def test_pay(self):
branch = self.create_branch()
group = self.create_payment_group()
method = PaymentMethod.get_by_name(self.store, u'bill')
payment1 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment2 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
group.confirm()
self.assertEqual(payment1.status, Payment.STATUS_PENDING)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
payment2.pay()
self.assertEqual(payment2.status, Payment.STATUS_PAID)
group.pay()
self.assertEqual(payment1.status, Payment.STATUS_PAID)
self.assertEqual(payment2.status, Payment.STATUS_PAID)
def test_pay_money_payments(self):
branch = self.create_branch()
group = self.create_payment_group()
method = PaymentMethod.get_by_name(self.store, u'bill')
payment1 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment2 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
method = PaymentMethod.get_by_name(self.store, u'money')
method.max_installments = 2
payment3 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment4 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
group.confirm()
self.assertEqual(payment1.status, Payment.STATUS_PENDING)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
self.assertEqual(payment3.status, Payment.STATUS_PENDING)
self.assertEqual(payment4.status, Payment.STATUS_PENDING)
payment3.pay()
self.assertEqual(payment3.status, Payment.STATUS_PAID)
group.pay_method_payments(u'money')
self.assertEqual(payment1.status, Payment.STATUS_PENDING)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
self.assertEqual(payment3.status, Payment.STATUS_PAID)
self.assertEqual(payment4.status, Payment.STATUS_PAID)
def test_cancel(self):
branch = self.create_branch()
group = self.create_payment_group()
method = PaymentMethod.get_by_name(self.store, u'bill')
payment1 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment2 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
payment3 = method.create_payment(Payment.TYPE_IN, group, branch, Decimal(10))
group.confirm()
payment3.pay()
self.assertEqual(payment1.status, Payment.STATUS_PENDING)
self.assertEqual(payment2.status, Payment.STATUS_PENDING)
self.assertEqual(payment3.status, Payment.STATUS_PAID)
group.cancel()
self.assertEqual(payment1.status, Payment.STATUS_CANCELLED)
self.assertEqual(payment2.status, Payment.STATUS_CANCELLED)
self.assertEqual(payment3.status, Payment.STATUS_PAID)
def test_installments_commission_amount(self):
self._payComissionWhenConfirmed()
sale = self.create_sale()
sellable = self.add_product(sale, price=300)
sale.order()
CommissionSource(sellable=sellable,
direct_value=12,
installments_value=5,
store=self.store)
method = PaymentMethod.get_by_name(self.store, u'check')
method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(100))
method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(200))
self.assertTrue(self.store.find(Commission, sale=sale).is_empty())
sale.confirm()
self.assertFalse(self.store.find(Commission, sale=sale).is_empty())
commissions = self.store.find(Commission,
sale=sale).order_by(Commission.value)
self.assertEquals(commissions.count(), 2)
for c in commissions:
self.failUnless(c.commission_type == Commission.INSTALLMENTS)
# the first payment represent 1/3 of the total amount
# 5% of 300: 15,00 * 1/3 => 5,00
self.assertEquals(commissions[0].value, Decimal("5.00"))
# the second payment represent 2/3 of the total amount
# $15 * 2/3 => 10,00
self.assertEquals(commissions[1].value, Decimal("10.00"))
def test_installments_commission_amount_with_multiple_items(self):
self._payComissionWhenConfirmed()
sale = self.create_sale()
sellable = self.add_product(sale, price=300, quantity=3)
sale.order()
CommissionSource(sellable=sellable,
direct_value=12,
installments_value=5,
store=self.store)
method = PaymentMethod.get_by_name(self.store, u'check')
method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(300))
method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(450))
method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(150))
self.assertTrue(self.store.find(Commission, sale=sale).is_empty())
sale.confirm()
commissions = self.store.find(Commission,
sale=sale).order_by(Commission.value)
self.assertEquals(commissions.count(), 3)
for c in commissions:
self.failUnless(c.commission_type == Commission.INSTALLMENTS)
# the first payment represent 1/3 of the total amount
# 45 / 6 => 7.50
self.assertEquals(commissions[0].value, Decimal("7.50"))
# the second payment represent 1/3 of the total amount
# 5% of 900: 45,00 * 1/3 => 15,00
self.assertEquals(commissions[1].value, Decimal("15.00"))
# the third payment represent 1/2 of the total amount
# 45 / 2 => 22,50
self.assertEquals(commissions[2].value, Decimal("22.50"))
def test_installments_commission_amount_when_sale_return(self):
if True:
raise SkipTest(u"See stoqlib.domain.returnedsale.ReturnedSale.return_ "
u"and bug 5215.")
self._payComissionWhenConfirmed()
sale = self.create_sale()
sellable = self.create_sellable()
CommissionSource(sellable=sellable,
direct_value=12,
installments_value=5,
store=self.store)
sale.add_sellable(sellable, quantity=3, price=300)
product = sellable.product
branch = get_current_branch(self.store)
self.create_storable(product, branch, 100)
sale.order()
method = PaymentMethod.get_by_name(self.store, u'check')
payment1 = method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(300))
payment2 = method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(450))
payment3 = method.create_payment(Payment.TYPE_IN, sale.group, sale.branch, Decimal(150))
sale.confirm()
# the commissions are created after the payment
payment1.pay()
payment2.pay()
payment3.pay()
returned_sale = sale.create_sale_return_adapter()
returned_sale.return_()
self.assertEqual(sale.status, Sale.STATUS_RETURNED)
commissions = self.store.find(Commission, sale=sale)
value = sum([c.value for c in commissions])
self.assertEqual(value, Decimal(0))
self.assertEqual(commissions.count(), 4)
self.failIf(commissions[-1].value >= 0)
def test_get_total_value(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
# On sale's group, total value should return
# sum(inpayments.value) - sum(outpayments.value)
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_value(), 0)
method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(100))
self.assertEqual(group.get_total_value(), Decimal(100))
method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(200))
self.assertEqual(group.get_total_value(), Decimal(300))
method.create_payment(Payment.TYPE_OUT, group, sale.branch, Decimal(50))
self.assertEqual(group.get_total_value(), Decimal(250))
# Test for a group in a purchase
# On purchase's group, total value should return
# sum(inpayments.value) - sum(outpayments.value)
purchase = self.create_purchase_order()
group = purchase.group
self.assertEqual(group.get_total_value(), 0)
method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(100))
self.assertEqual(group.get_total_value(), Decimal(100))
method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(200))
self.assertEqual(group.get_total_value(), Decimal(300))
method.create_payment(Payment.TYPE_IN, group, purchase.branch, Decimal(50))
self.assertEqual(group.get_total_value(), Decimal(250))
def test_get_total_to_pay(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_to_pay(), 0)
payment1 = method.create_payment(Payment.TYPE_IN, group, sale.branch,
Decimal(100))
payment1.set_pending()
self.assertEqual(group.get_total_to_pay(), Decimal(100))
payment2 = method.create_payment(Payment.TYPE_IN, group, sale.branch,
Decimal(200))
payment2.set_pending()
self.assertEqual(group.get_total_to_pay(), Decimal(300))
payment1.pay()
self.assertEqual(group.get_total_to_pay(), Decimal(200))
payment2.pay()
self.assertEqual(group.get_total_to_pay(), Decimal(0))
def test_get_total_confirmed_value(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
# On sale's group, total value should return
# sum(inpayments.value) - sum(outpayments.value)
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_confirmed_value(), 0)
p = method.create_payment(
Payment.TYPE_IN, group, sale.branch, Decimal(100))
self.assertEqual(group.get_total_confirmed_value(), 0)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 100)
p = method.create_payment(
Payment.TYPE_IN, group, sale.branch, Decimal(200))
self.assertEqual(group.get_total_confirmed_value(), 100)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 300)
p = method.create_payment(
Payment.TYPE_OUT, group, sale.branch, Decimal(50))
self.assertEqual(group.get_total_confirmed_value(), 300)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 250)
# Test for a group in a purchase
# On purchase's group, total value should return
# sum(inpayments.value) - sum(outpayments.value)
purchase = self.create_purchase_order()
group = purchase.group
self.assertEqual(group.get_total_confirmed_value(), 0)
p = method.create_payment(
Payment.TYPE_OUT, group, purchase.branch, Decimal(100))
self.assertEqual(group.get_total_confirmed_value(), 0)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 100)
p = method.create_payment(
Payment.TYPE_OUT, group, purchase.branch, Decimal(200))
self.assertEqual(group.get_total_confirmed_value(), 100)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 300)
p = method.create_payment(
Payment.TYPE_IN, group, purchase.branch, Decimal(50))
self.assertEqual(group.get_total_confirmed_value(), 300)
p.set_pending()
self.assertEqual(group.get_total_confirmed_value(), 250)
def test_get_total_discount(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
# On sale's group, total value should return
# sum(inpayments.discount) - sum(outpayments.discount)
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.discount = Decimal(10)
self.assertEqual(group.get_total_discount(), Decimal(10))
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.discount = Decimal(20)
self.assertEqual(group.get_total_discount(), Decimal(30))
p = method.create_payment(Payment.TYPE_OUT, group, sale.branch, Decimal(10))
p.discount = Decimal(10)
self.assertEqual(group.get_total_discount(), Decimal(20))
# Test for a group in a purchase
# On purchase's group, total value should return
# sum(inpayments.discount) - sum(outpayments.discount)
purchase = self.create_purchase_order()
group = purchase.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.discount = Decimal(10)
self.assertEqual(group.get_total_discount(), Decimal(10))
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.discount = Decimal(20)
self.assertEqual(group.get_total_discount(), Decimal(30))
p = method.create_payment(Payment.TYPE_IN, group, purchase.branch, Decimal(10))
p.discount = Decimal(10)
self.assertEqual(group.get_total_discount(), Decimal(20))
def test_get_total_interest(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
# On sale's group, total value should return
# sum(inpayments.interest) - sum(outpayments.interest)
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.interest = Decimal(10)
self.assertEqual(group.get_total_interest(), Decimal(10))
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.interest = Decimal(20)
self.assertEqual(group.get_total_interest(), Decimal(30))
p = method.create_payment(Payment.TYPE_OUT, group, sale.branch, Decimal(10))
p.interest = Decimal(10)
self.assertEqual(group.get_total_interest(), Decimal(20))
# Test for a group in a purchase
# On purchase's group, total value should return
# sum(inpayments.interest) - sum(outpayments.interest)
purchase = self.create_purchase_order()
group = purchase.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.interest = Decimal(10)
self.assertEqual(group.get_total_interest(), Decimal(10))
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.interest = Decimal(20)
self.assertEqual(group.get_total_interest(), Decimal(30))
p = method.create_payment(Payment.TYPE_IN, group, purchase.branch, Decimal(10))
p.interest = Decimal(10)
self.assertEqual(group.get_total_interest(), Decimal(20))
def test_get_total_penalty(self):
method = PaymentMethod.get_by_name(self.store, u'check')
# Test for a group in a sale
# On sale's group, total value should return
# sum(inpayments.penalty) - sum(outpayments.penalty)
sale = self.create_sale()
group = sale.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.penalty = Decimal(10)
self.assertEqual(group.get_total_penalty(), Decimal(10))
p = method.create_payment(Payment.TYPE_IN, group, sale.branch, Decimal(10))
p.penalty = Decimal(20)
self.assertEqual(group.get_total_penalty(), Decimal(30))
p = method.create_payment(Payment.TYPE_OUT, group, sale.branch, Decimal(10))
p.penalty = Decimal(10)
self.assertEqual(group.get_total_penalty(), Decimal(20))
# Test for a group in a purchase
# On purchase's group, total value should return
# sum(inpayments.penalty) - sum(outpayments.penalty)
purchase = self.create_purchase_order()
group = purchase.group
self.assertEqual(group.get_total_value(), 0)
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.penalty = Decimal(10)
self.assertEqual(group.get_total_penalty(), Decimal(10))
p = method.create_payment(Payment.TYPE_OUT, group, purchase.branch, Decimal(10))
p.penalty = Decimal(20)
self.assertEqual(group.get_total_penalty(), Decimal(30))
p = method.create_payment(Payment.TYPE_IN, group, purchase.branch, Decimal(10))
p.penalty = Decimal(10)
self.assertEqual(group.get_total_penalty(), Decimal(20))
def test_get_payment_by_method_name(self):
group = self.create_payment_group()
method = PaymentMethod.get_by_name(self.store, u'money')
money_payment1 = self.create_payment(method=method)
group.add_item(money_payment1)
money_payment2 = self.create_payment(method=method)
group.add_item(money_payment2)
method = PaymentMethod.get_by_name(self.store, u'check')
check_payment1 = self.create_payment(method=method)
group.add_item(check_payment1)
check_payment2 = self.create_payment(method=method)
group.add_item(check_payment2)
money_payments = group.get_payments_by_method_name(u'money')
for payment in [money_payment1, money_payment2]:
self.assertTrue(payment in money_payments)
for payment in [check_payment1, check_payment2]:
self.assertFalse(payment in money_payments)
check_payments = group.get_payments_by_method_name(u'check')
for payment in [check_payment1, check_payment2]:
self.assertTrue(payment in check_payments)
for payment in [money_payment1, money_payment2]:
self.assertFalse(payment in check_payments)
def test_get_parent(self):
sale = self.create_sale()
purchase = self.create_purchase_order()
renegotiation = self.create_payment_renegotiation()
group = self.create_payment_group()
decrease = self.create_stock_decrease(group=group)
payment_group = self.create_payment_group()
self.assertEquals(sale, sale.group.get_parent())
self.assertEquals(purchase, purchase.group.get_parent())
self.assertEquals(renegotiation, renegotiation.group.get_parent())
self.assertEquals(decrease, decrease.group.get_parent())
self.assertEquals(None, payment_group.get_parent())
def test_get_description(self):
sale = self.create_sale()
purchase = self.create_purchase_order()
renegotiation = self.create_payment_renegotiation()
group = self.create_payment_group()
decrease = self.create_stock_decrease(group=group)
sale.identifier = 77777
purchase.identifier = 88888
renegotiation.identifier = 99999
decrease.identifier = 12345
self.assertEquals(sale.group.get_description(), u'sale 77777')
self.assertEquals(purchase.group.get_description(), u'order 88888')
self.assertEquals(renegotiation.group.get_description(),
u'renegotiation 99999')
self.assertEquals(decrease.group.get_description(),
u'stock decrease 12345')
|
andrebellafronte/stoq
|
stoqlib/domain/test/test_payment_group.py
|
Python
|
gpl-2.0
| 24,283
|
[
"VisIt"
] |
decebac2a3ce5c47a47539c9b8077fb15781f766ae5a62c1c09aeddd54beca81
|
# proxy module
from __future__ import absolute_import
from mayavi.modules.hyper_streamline import *
|
enthought/etsproxy
|
enthought/mayavi/modules/hyper_streamline.py
|
Python
|
bsd-3-clause
| 100
|
[
"Mayavi"
] |
3040f00ec85d29af0b84c8b4a41d257b48d9aae16bb67a0565b43c4f9a5879f7
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing netCDF, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.modules import get_software_root, get_software_version
class EB_netCDF(CMakeMake):
"""Support for building/installing netCDF"""
def configure_step(self):
"""Configure build: set config options and configure"""
if LooseVersion(self.version) < LooseVersion("4.3"):
self.cfg.update('configopts', "--enable-shared")
if self.toolchain.options['pic']:
self.cfg.update('configopts', '--with-pic')
tup = (os.getenv('FFLAGS'), os.getenv('MPICC'), os.getenv('F90'))
self.cfg.update('configopts', 'FCFLAGS="%s" CC="%s" FC="%s"' % tup)
# add -DgFortran to CPPFLAGS when building with GCC
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
self.cfg.update('configopts', 'CPPFLAGS="%s -DgFortran"' % os.getenv('CPPFLAGS'))
ConfigureMake.configure_step(self)
else:
hdf5 = get_software_root('HDF5')
if hdf5:
env.setvar('HDF5_ROOT', hdf5)
CMakeMake.configure_step(self)
def sanity_check_step(self):
"""
Custom sanity check for netCDF
"""
incs = ["netcdf.h"]
libs = ["libnetcdf.so", "libnetcdf.a"]
# since v4.2, the non-C libraries have been split off in seperate extensions_step
# see netCDF-Fortran and netCDF-C++
if LooseVersion(self.version) < LooseVersion("4.2"):
incs += ["netcdf%s" % x for x in ["cpp.h", ".hh", ".inc", ".mod"]] + \
["ncvalues.h", "typesizes.mod"]
libs += ["libnetcdf_c++.so", "libnetcdff.so",
"libnetcdf_c++.a", "libnetcdff.a"]
custom_paths = {
'files': ["bin/nc%s" % x for x in ["-config", "copy", "dump",
"gen", "gen3"]] +
[("lib/%s" % x,"lib64/%s" % x) for x in libs] +
["include/%s" % x for x in incs],
'dirs': []
}
super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)
def set_netcdf_env_vars(log):
"""Set netCDF environment variables used by other software."""
netcdf = get_software_root('netCDF')
if not netcdf:
log.error("netCDF module not loaded?")
else:
env.setvar('NETCDF', netcdf)
log.debug("Set NETCDF to %s" % netcdf)
netcdff = get_software_root('netCDF-Fortran')
netcdf_ver = get_software_version('netCDF')
if not netcdff:
if LooseVersion(netcdf_ver) >= LooseVersion("4.2"):
log.error("netCDF v4.2 no longer supplies Fortran library, also need netCDF-Fortran")
else:
env.setvar('NETCDFF', netcdff)
log.debug("Set NETCDFF to %s" % netcdff)
def get_netcdf_module_set_cmds(log):
"""Get module setenv commands for netCDF."""
netcdf = os.getenv('NETCDF')
if netcdf:
txt = "setenv NETCDF %s\n" % netcdf
# netCDF-Fortran is optional (only for netCDF v4.2 and later)
netcdff = os.getenv('NETCDFF')
if netcdff:
txt += "setenv NETCDFF %s\n" % netcdff
return txt
else:
log.error("NETCDF environment variable not set?")
|
geimer/easybuild-easyblocks
|
easybuild/easyblocks/n/netcdf.py
|
Python
|
gpl-2.0
| 4,985
|
[
"NetCDF"
] |
010324fb2b4d7187f877a352649904524bb57afb393961407b14025a61fd0e90
|
# -*- coding: utf-8 -*-
"""
Usage:
search_google [q] [--optional]
search_google [-positional] ...
A command line tool for Google web and image search.
Positional arguments:
q keyword query
-h show this help message and exit
-i show documentation in browser
-a show optional arguments in browser
-s <arg>=<value> set default optional arguments
-r <arg> remove default arguments
-v view default arguments
-d reset default arguments
Optional arguments:
--num num of results (default: 3)
--searchType 'image' or unassigned for web search
--dateRestrict time period of search
--start index of first result
--fileType format for image search (default: png)
--save_links path for text file of links
--save_metadata path for metadata JSON file
--save_downloads path for directory of link downloads
--option_silent 'True' to disable preview
--option_preview num of results to preview
For more arguments use: search_google -a
Examples:
Set developer and search engine key arguments
> search_google -s build_developerKey="dev_key"
> search_google -s cx="cse_key"
Web search for keyword "cat"
> search_google cat
Search for "cat" images
> search_google cat --searchType=image
Download links to directory
> search_google cat --save_downloads=downloads
For more information visit use: search_google -i
"""
from os.path import isfile
from pkg_resources import resource_filename, Requirement
from pprint import pprint
from sys import argv
from webbrowser import open_new_tab
import json
import kwconfig
import search_google.api
_doc_link = 'https://github.com/rrwen/search_google'
_cse_link = 'https://developers.google.com/resources/api-libraries/documentation/customsearch/v1/python/latest/customsearch_v1.cse.html'
def run(argv=argv):
"""Runs the search_google command line tool.
This function runs the search_google command line tool
in a terminal. It was intended for use inside a py file
(.py) to be executed using python.
Notes:
* ``[q]`` reflects key ``q`` in the ``cseargs`` parameter for :class:`api.results`
* Optional arguments with ``build_`` are keys in the ``buildargs`` parameter for :class:`api.results`
For distribution, this function must be defined in the following files::
# In 'search_google/search_google/__main__.py'
from .cli import run
run()
# In 'search_google/search_google.py'
from search_google.cli import run
if __name__ == '__main__':
run()
# In 'search_google/__init__.py'
__entry_points__ = {'console_scripts': ['search_google=search_google.cli:run']}
Examples::
# Import google_streetview for the cli module
import search_google.cli
# Create command line arguments
argv = [
'cli.py',
'google',
'--searchType=image',
'--build_developerKey=your_dev_key',
'--cx=your_cx_id'
'--num=1'
]
# Run command line
search_google.cli.run(argv)
"""
config_file = kwconfig.manage(
file_path=resource_filename(Requirement.parse('search_google'), 'search_google/config.json'),
defaults={
'build_serviceName': 'customsearch',
'build_version': 'v1',
'num': 3,
'fileType': 'png',
'option_silent': 'False',
'option_preview' : 10})
# (commands) Main command calls
if len(argv) > 1:
if argv[1] == '-i': # browse docs
open_new_tab(_doc_link)
exit()
elif argv[1] == '-a': # browse arguments
open_new_tab(_cse_link)
exit()
config_file.command(argv, i=1, doc=__doc__, quit=True, silent=False)
# (parse_args) Parse command arguments into dict
kwargs = kwconfig.parse(argv[2:])
kwargs['q'] = argv[1]
kwargs = config_file.add(kwargs)
# (split_args) Split args into build, cse, and save arguments
buildargs = {}
cseargs = {}
saveargs = {}
optionargs = {}
for k, v in kwargs.items():
if 'build_' == k[0:6]:
buildargs[k[6:]] = v
elif 'save_' == k[0:5]:
saveargs[k[5:]] = v
elif 'option_' == k[0:7]:
optionargs[k[7:]] = v
else:
cseargs[k] = v
# (cse_results) Get google api results
results = search_google.api.results(buildargs, cseargs)
# (cse_print) Print a preview of results
if 'silent' in optionargs:
if optionargs['silent'].lower() != 'true':
results.preview(n=int(optionargs['preview']))
# (cse_save) Save links and metadata
if 'links' in saveargs:
results.save_links(saveargs['links'])
if 'metadata' in saveargs:
results.save_metadata(saveargs['metadata'])
# (cse_download) Download links
if 'downloads' in saveargs:
results.download_links(saveargs['downloads'])
|
rrwen/search_google
|
search_google/cli.py
|
Python
|
mit
| 5,007
|
[
"VisIt"
] |
f0a8e74944b9f0161b50da923793fbf2b62771f56c59b69c56ff770e7087b439
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from typing import Optional
import logging
import sys
import os
import math
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config as kivyConfig
from kivy.config import ConfigParser as OrcaConfigParser
# noinspection PyProtectedMember
from kivy.logger import FileHandler
from kivy.logger import Logger
from kivy.metrics import Metrics
from kivy.uix.settings import SettingsWithSpinner
from kivy.uix.widget import Widget
from kivy.core.window import Window
import ORCA.Globals as Globals
from ORCA.Actions import cActions
from ORCA.utils.Atlas import CreateAtlas
from ORCA.utils.Atlas import ClearAtlas
from ORCA.settings.AppSettings import Build_Settings
from ORCA.definition.DefinitionPathes import cDefinitionPathes
from ORCA.definition.Definitions import cDefinitions
from ORCA.definition.Definitions import GetDefinitionFileNameByName
from ORCA.download.DownLoadSettings import cDownLoad_Settings
from ORCA.download.InstalledReps import cInstalledReps
from ORCA.Events import cEvents
from ORCA.interfaces.Interfaces import cInterFaces
from ORCA.International import cLanguage
from ORCA.Notifications import cNotifications
from ORCA.Screen_Init import cTheScreenWithInit
from ORCA.scripts.Scripts import cScripts
from ORCA.Sound import cSound
from ORCA.Parameter import cParameter
from ORCA.vars.Replace import ReplaceVars
from ORCA.vars.Links import DelAllVarLinks
from ORCA.vars.Access import SetVar
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
from ORCA.utils.ConfigHelpers import Config_GetDefault_Bool
from ORCA.utils.ConfigHelpers import Config_GetDefault_Float
from ORCA.utils.ConfigHelpers import Config_GetDefault_Str
from ORCA.utils.ConfigHelpers import Config_GetDefault_Int
from ORCA.utils.ConfigHelpers import Config_GetDefault_Path
from ORCA.utils.ModuleLoader import cModuleLoader
from ORCA.utils.FileName import cFileName
from ORCA.utils.Path import cPath
from ORCA.utils.LogError import LogError
from ORCA.utils.Network import cWaitForConnectivity
from ORCA.utils.CheckPermissions import cCheckPermissions
from ORCA.utils.Platform import OS_GetDefaultNetworkCheckMode
from ORCA.utils.Platform import OS_GetDefaultStretchMode
from ORCA.utils.Platform import OS_GetLocale
from ORCA.utils.Platform import OS_GetWindowSize
from ORCA.utils.Platform import OS_GetInstallationDataPath
from ORCA.utils.Platform import OS_GetUserDownloadsDataPath
from ORCA.utils.Platform import OS_Platform
from ORCA.utils.Platform import OS_GetUserDataPath
from ORCA.utils.Platform import OS_GetIPAddressV4
from ORCA.utils.Platform import OS_GetIPAddressV6
from ORCA.utils.Platform import OS_GetGatewayV4
from ORCA.utils.Platform import OS_GetGatewayV6
from ORCA.utils.Platform import OS_GetSubnetV4
from ORCA.utils.Platform import OS_GetSubnetV6
from ORCA.utils.Platform import OS_GetMACAddress
from ORCA.utils.Rotation import cRotation
from ORCA.utils.Sleep import fSleep
from ORCA.utils.TypeConvert import ToFloat
from ORCA.utils.TypeConvert import ToIntVersion
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.utils.wait.StartWait import StartWait
from ORCA.utils.wait.StopWait import StopWait
from ORCA.definition.DefinitionContext import SetDefinitionPathes
from ORCA.Queue import ClearQueue
class ORCA_App(App):
""" The Main Orca Class, here starts all """
def __init__(self) -> None:
"""
We initialize all App vars. Even as this is formally not required in python,
I prefer to have it this way
"""
App.__init__(self)
# Don't Move or change
self.sVersion="5.0.4"
self.sBranch="Edinburgh"
#todo: Remove in release
#Logger.setLevel(logging.DEBUG)
Globals.uVersion = ToUnicode(self.sVersion)
Globals.iVersion = ToIntVersion(Globals.uVersion) # string of App Version
SetVar(uVarName = u'REPVERSION', oVarValue = ToUnicode(Globals.iVersion))
Globals.uBranch = ToUnicode(self.sBranch)
Globals.oApp = self
Globals.uPlatform = OS_Platform() # The used Platform
Globals.oParameter = cParameter() # Object for Commandline and Environment Parameter
Globals.aRepNames = [('$lvar(683)', 'definitions'),
('$lvar(690)', 'wizard templates'),
('$lvar(684)', 'codesets'),
('$lvar(685)', 'skins'),
('$lvar(686)', 'interfaces'),
('$lvar(730)', 'scripts'),
('$lvar(687)', 'languages'),
('$lvar(689)', 'sounds'),
('$lvar(691)', 'fonts'),
('$lvar(688)', 'others')]
Globals.oModuleLoader = cModuleLoader()
Globals.oOrcaConfigParser = OrcaConfigParser()
Globals.oActions = cActions()
Globals.oCheckPermissions = cCheckPermissions() # Object for checking, if we have permissions
Globals.oDefinitions = cDefinitions() # Object which holds all loaded definitions
Globals.oDownLoadSettings = cDownLoad_Settings() # Object, for managing the settings dialog for download repositories
Globals.oNotifications = cNotifications()
Globals.oRotation = cRotation()
Globals.oLanguage = cLanguage()
Globals.oScripts = cScripts() # Object which holds all scripts
Globals.oSound = cSound()
Globals.oInterFaces = cInterFaces() # Object which holds all Interfaces
Globals.oWaitForConnectivity = cWaitForConnectivity() # Object for checking, if we have network access
self.bClearCaches = False # If we install a new app version, all Caches (Atlas/Definition) are cleared
self.bDeInitDone = False # Flag, if de-initialisation already done
self.bOnError = False # Flag, if we got an error on app initialisation, Mainly used, if we can't find the ORCA definition files to give the user a chance to adjust the path
self.bOnWait = False # Flag, which shows, that a user has opened a questions No further actions behind this by now
self.oDiscoverList = None # Objects which represents the result of all discover scripts
self.oInput = None
self.oWaitMessage = None
self.settings_cls = SettingsWithSpinner
self.title = 'ORCA - Open Remote Control Application'
self.oFnConfig = None
self.uDefinitionToDelete = u''
self.oPathSkinRoot = None
self.uSoundsName = u''
self.tOldSize = (0,0)
SetVar(uVarName = u'WAITFORROTATION', oVarValue = '0')
OS_GetWindowSize()
Logger.info(u'Init: ORCA Remote Application started: Version %s (%s):' % (Globals.uVersion, Globals.uPlatform))
def build(self) -> Optional[Widget]:
"""
Frame work function, which gets called on application start
All Initialisation functions start here
We use it to show the splash
and after that the schedule all init functions
Init is scheduled to updated the splash screen for progress
"""
try:
# Window.borderless = True
Globals.oCheckPermissions.Wait()
kivyConfig.set('graphics', 'kivy_clock', 'interrupt')
kivyConfig.set('kivy','log_maxfiles','3')
Globals.oTheScreen = cTheScreenWithInit() # Create the Screen Object
Globals.oEvents = cEvents() # Create the Scheduler
Clock.schedule_once(self.Init_ReadConfig, 0) # Trigger the scheduled init functions
return Globals.oTheScreen.oRootSM # And return the root object (black background at first instance)
except Exception as e:
ShowErrorPopUp(uTitle='build: Fatal Error', uMessage=u'build: Fatal Error running Orca', bAbort=True, uTextContinue='', uTextQuit=u'Quit', oException=e)
return None
# noinspection PyUnusedLocal
def On_Size(self, win, size) ->None:
""" Function called by the Framework, when the size or rotation has changed """
if self.tOldSize==size:
return None
Logger.debug("Resize/rotation detected %d %d" % (size[0], size[1]))
self.tOldSize = size
Globals.iAppWidth, Globals.iAppHeight = size
if Globals.iAppWidth < Globals.iAppHeight:
Globals.uDeviceOrientation = 'portrait'
else:
Globals.uDeviceOrientation = 'landscape'
SetVar(uVarName = u'DEVICEORIENTATION', oVarValue = Globals.uDeviceOrientation)
if Globals.oParameter.bSmallScreen:
Globals.fScreenSize=4.5
SetVar(uVarName = u'SCREENSIZE', oVarValue = str(Globals.fScreenSize))
SetVar(uVarName = u'WAITFORROTATION', oVarValue= '0')
Globals.bWaitForRotation = False
Globals.oTheScreen.AdjustRatiosAfterResize()
return None
# noinspection PyUnusedLocal
def Init_ReadConfig(self, *largs) ->bool:
"""
Called by the timer to continue initialisation after appstart
More or less all actions after here will be executed by the scheduler/queue
"""
aActions:List[cActions]
if not self.Init():
return False
Logger.debug(u'Late_Init: Screen Resolution: %d x %d' % (Globals.iAppWidth, Globals.iAppHeight))
Logger.debug(u'Late_Init_StartInitActions')
Globals.oTheScreen.Init() # Add Global Vars first
Globals.oInterFaces.Init() # Create the Interfaces:
# from ORCA.utils.ParseResult_Test import ResultParser_Test
# ResultParser_Test()
# and execute the startup scripts
aActions = Globals.oEvents.CreateSimpleActionList(aActions = [{u'name':u'Show Message we begin',u'string': u'showsplashtext', u'maintext': u'Executing Startup Script'},
{u'name':u'And kick off the start up actions',u'string': u'loaddefinition'}
])
Globals.oEvents.ExecuteActionsNewQueue(aActions=aActions, oParentWidget=None)
return False
# noinspection PyMethodMayBeStatic
def DownloadDefinition(self, uDefinitionName) -> bool:
"""
Downloads a specific definition and restarts after
"""
StartWait()
Globals.oDownLoadSettings.LoadDirect(uDirect=' :definitions:' + uDefinitionName, bForce=True)
return False # we do not proceed here as Downloader will restart
def RestartAfterDefinitionLoad(self)->bool:
"""
This function will get either called when we detect a new ORCA version we downloaded the updated ORCA repository files
Or it will get called at the first time installation after we downloaded the setup definition
in Both cases we restart ORCA to make changes effective
"""
Globals.oOrcaConfigParser.set(u'ORCA', u'lastinstalledversion', str(Globals.iVersion))
Globals.oOrcaConfigParser.write()
# if we get called after a repository update caused by new version install (but not on the first install)
# we restart to make the changes effective
# todo: check if we just need to skip restart as we might have updated several definitions
if Globals.iVersion != Globals.iLastInstalledVersion and Globals.iLastInstalledVersion!=0:
# self.ReStart()
return True
uTmp, uRepType, uRepName = Globals.oDownLoadSettings.uLast.split(':')
if uRepType == u'definitions':
if self.CheckForOrcaFiles():
uDefName = GetDefinitionFileNameByName(uDefinitionName=uRepName)
Globals.uDefinitionName = uDefName
Globals.oOrcaConfigParser.set(u'ORCA', u'definition', uDefName)
Globals.oOrcaConfigParser.write()
self.ReStart()
return True
StopWait()
return True
# noinspection PyUnusedLocal
def RestartAfterRepositoryUpdate(self, *largs)->bool:
"""
Restarts ORCA, after a definition has been updated
"""
Globals.oOrcaConfigParser.set(u'ORCA', u'lastinstalledversion', str(Globals.iVersion))
Globals.oOrcaConfigParser.write()
Globals.iLastInstalledVersion = Globals.iVersion
StopWait()
self.ReStart()
return True
def CheckForOrcaFiles(self)->bool:
"""
Checks, if ORCA files are available somewhere
"""
oFnCheck = cFileName(Globals.oPathRoot + 'actions')+ 'actions.xml'
Logger.debug(u'Looking for Orca files at ' + str(oFnCheck))
# if we can't find orca files (tested on the actions xml file), we stop here
if not oFnCheck.Exists():
Globals.bInit = False
self.ShowSettings()
uMsg = ReplaceVars("$lvar(415)") % str(Globals.oPathRoot)
Logger.critical(uMsg)
ShowErrorPopUp(uTitle='CheckForOrcaFiles: Fatal Error', uMessage=uMsg, bAbort=True, uTextContinue='Continue',uTextQuit=u'Quit')
return False
return True
def Init(self) -> bool:
"""
first real init step
Sets some basic vars
and find/sets the path to the orca files
"""
try:
'''
oPathAppReal: The path where the OS Installer places the installation files, eg the the fallback action files
Could be every where and could be a read only location
Not necessary the place where the binaries are
oPathRoot: This is the path, where to find the (downloaded) ORCA files. Can be changed in the settings
'''
Globals.oPathAppReal = OS_GetInstallationDataPath()
Globals.oPathRoot = OS_GetUserDataPath()
Globals.uIPAddressV4 = OS_GetIPAddressV4()
Globals.uIPSubNetV4 = OS_GetSubnetV4()
Globals.uIPGateWayV4 = OS_GetGatewayV4()
Globals.uIPAddressV6 = OS_GetIPAddressV6()
Globals.uIPSubNetV6 = OS_GetSubnetV6()
Globals.uIPGateWayV6 = OS_GetGatewayV6()
Globals.uMACAddressColon, Globals.uMACAddressDash = OS_GetMACAddress()
Globals.oPathApp = cPath(os.getcwd())
if str(Globals.oParameter.oPathDebug):
Globals.oPathRoot = Globals.oParameter.oPathDebug
Globals.oPathAppReal = Globals.oParameter.oPathDebug
Logger.info('OrcaAppInit (Root/Real): Path: ' + Globals.oPathAppReal)
Logger.info('OrcaAppInit (Root) : Path: ' + Globals.oPathRoot)
SetVar(uVarName = u'APPLICATIONPATH', oVarValue = Globals.oPathRoot.string)
SetVar(uVarName = u'WIZARDTEMPLATESPATH', oVarValue = (Globals.oPathRoot + "wizard templates").unixstring)
if not Globals.oPathRoot.IsDir():
Globals.oPathRoot.Create()
# Read all custom settings
if not self.InitAndReadSettingsPanel():
return False
Globals.oLanguage.Init() # Init the Languages (doesn't load them)
Globals.oInterFaces.LoadInterfaceList() # load the list of all available interfaces
Globals.oScripts.LoadScriptList() # load the list of all available scripts
# Create the atlas files for the skin and the definition
if Globals.oDefinitionPathes.oPathDefinition.Exists():
if Globals.uDefinitionName != "setup":
CreateAtlas(oPicPath=Globals.oDefinitionPathes.oPathDefinition,oAtlasFile=Globals.oDefinitionPathes.oFnDefinitionAtlas,uDebugMsg=u'Create Definition Atlas Files')
Globals.bInit = True
return True
except Exception as e:
uMsg = LogError(uMsg=u'App Init:Unexpected error:', oException=e)
Logger.critical(uMsg)
ShowErrorPopUp(uTitle='App Init:Fatal Error', uMessage=uMsg, bAbort=True, uTextContinue='', uTextQuit=u'Quit')
self.bOnError = True
return 0
def InitAndReadSettingsPanel(self)->bool:
"""
Reads the complete settings from the orca.ini file
it will set setting defaults, if we do not have an ini file by now
"""
try:
if Globals.oParameter.oPathLog.string:
oPathLogfile = Globals.oParameter.oPathLog
oPathLogfile.Create()
kivyConfig.set('kivy', 'log_dir', oPathLogfile.string)
kivyConfig.write()
# uOrgLogFn=Logger.manager.loggerDict["kivy"].handlers[1].filename
Logger.debug(u"Init: Replacing Logfile Location to :"+Globals.oParameter.oPathLog.string)
Logger.level=Logger.level
Globals.fDoubleTapTime = ToFloat(kivyConfig.getint('postproc', 'double_tap_time')) / 1000.0
self.oFnConfig = cFileName(Globals.oPathRoot) + u'orca.ini'
oConfig = Globals.oOrcaConfigParser
oConfig.filename = self.oFnConfig.string
if self.oFnConfig.Exists():
oConfig.read(self.oFnConfig.string)
if not oConfig.has_section(u'ORCA'):
oConfig.add_section(u'ORCA')
Globals.uDefinitionName = Config_GetDefault_Str(oConfig=oConfig,uSection= u'ORCA',uOption= u'definition',vDefaultValue= u'setup')
if "[" in Globals.uDefinitionName:
Globals.uDefinitionName = Globals.uDefinitionName[Globals.uDefinitionName.find("[")+1 : Globals.uDefinitionName.find("]")]
if Globals.uDefinitionName == u'setup':
Logger.setLevel(logging.DEBUG)
oRootPath = Config_GetDefault_Path(oConfig=oConfig, uSection=u'ORCA', uOption=u'rootpath', uDefaultValue=Globals.oPathRoot.string)
if oRootPath.string:
Globals.oPathRoot = oRootPath
oFnCheck = cFileName(Globals.oPathRoot + 'actions') +'actionsfallback.xml'
if not oFnCheck.Exists():
Globals.oPathRoot = OS_GetUserDataPath()
Logger.debug(u'Init: Override Path:' + Globals.oPathRoot)
self.InitRootDirs()
Globals.iLastInstalledVersion = Config_GetDefault_Int(oConfig=oConfig, uSection=u'ORCA', uOption='lastinstalledversion',uDefaultValue= Globals.uVersion)
# The protected file /flag indicates, that we are in the development environment, so we will not download anything from the repository
Globals.bProtected = (Globals.oPathRoot + u'protected').Exists()
if Globals.bProtected:
SetVar(uVarName = "PROTECTED", oVarValue = "1")
else:
SetVar(uVarName = "PROTECTED", oVarValue = "0")
# get the installed interfaces , etc
i = 0
while True:
oInstalledRep = cInstalledReps()
uKey = u'installedrep%i_type' % i
oInstalledRep.uType = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=uKey, vDefaultValue='')
uKey = u'installedrep%i_name' % i
oInstalledRep.uName = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=uKey, vDefaultValue='')
uKey = u'installedrep%i_version' % i
oInstalledRep.iVersion = Config_GetDefault_Int(oConfig=oConfig, uSection=u'ORCA', uOption=uKey, uDefaultValue="0")
if not oInstalledRep.uName == '':
uKey = '%s:%s' % (oInstalledRep.uType, oInstalledRep.uName)
Globals.dInstalledReps[uKey] = oInstalledRep
i += 1
else:
break
del Globals.aRepositories[:]
# get the configured repos
for i in range(Globals.iCntRepositories):
if i == 0:
uDefault = 'https://www.orca-remote.org/repositories/ORCA_$var(REPVERSION)/repositories'
else:
uDefault = ''
uKey = u'repository' + str(i)
uRep = ReplaceVars(Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=uKey, vDefaultValue=uDefault))
Globals.aRepositories.append(uRep)
# we add some values for state, which helps for the Download Settings
uKey = u'repository_state' + str(i)
Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=uKey, vDefaultValue='1')
# Getting the lists for skins, definitions and languages
Globals.aSkinList = self.oPathSkinRoot.GetFolderList()
Globals.aLanguageList = Globals.oPathLanguageRoot.GetFolderList()
Globals.aDefinitionList = Globals.oPathDefinitionRoot.GetFolderList()
Globals.uSkinName = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'skin', vDefaultValue=u'ORCA_silver_hires')
self.uSoundsName = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'sounds', vDefaultValue=u'ORCA_default')
Globals.uLanguage = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'language', vDefaultValue=OS_GetLocale())
Globals.bShowBorders = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'showborders', uDefaultValue=u'0')
Globals.uDefinitionContext = Globals.uDefinitionName
# this is temporary as some screen animation do not work in the final WINDOwS package (pyinstaller package)
uDefaultType:str = "fade"
if Globals.uPlatform == "win":
uDefaultType = "no"
Globals.uDefaultTransitionType = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'defaulttransitiontype', vDefaultValue=uDefaultType)
Globals.uDefaultTransitionDirection = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'defaulttransitiondirection', vDefaultValue="left")
if Globals.uDefinitionName == 'setup':
Logger.setLevel(logging.DEBUG)
if not Globals.uLanguage in Globals.aLanguageList:
if len(Globals.aLanguageList) > 0:
Globals.uLanguage = Globals.aLanguageList[0]
oConfig.set(u'ORCA', u'language', Globals.uLanguage)
Globals.uLocalesName = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'locales', vDefaultValue=u'UK (12h)')
if 'shared_documents' in Globals.aDefinitionList:
Globals.aDefinitionList.remove('shared_documents')
if not Globals.uDefinitionName in Globals.aDefinitionList:
if len(Globals.aDefinitionList) > 0:
Globals.uDefinitionName = Globals.aDefinitionList[0]
oConfig.set(u'ORCA', u'definition', Globals.uDefinitionName)
if not Globals.uSkinName in Globals.aSkinList:
if len(Globals.aSkinList) > 0:
Globals.uSkinName = Globals.aSkinList[0]
oConfig.set(u'ORCA', u'skin', Globals.uSkinName)
oConfig.set(u'ORCA', u'interface', ReplaceVars("select"))
oConfig.set(u'ORCA', u'script', ReplaceVars("select"))
oConfig.set(u'ORCA', u'definitionmanage', ReplaceVars("select"))
Globals.bInitPagesAtStart = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'initpagesatstartup', uDefaultValue=u'0')
Globals.fDelayedPageInitInterval = Config_GetDefault_Float(oConfig=oConfig, uSection=u'ORCA', uOption=u'delayedpageinitinterval',uDefaultValue= u'60')
Globals.fStartRepeatDelay = Config_GetDefault_Float(oConfig=oConfig, uSection=u'ORCA', uOption=u'startrepeatdelay',uDefaultValue= u'0.8')
Globals.fContRepeatDelay = Config_GetDefault_Float(oConfig=oConfig, uSection=u'ORCA', uOption=u'contrepeatdelay', uDefaultValue=u'0.2')
Globals.fLongPressTime = Config_GetDefault_Float(oConfig=oConfig, uSection=u'ORCA', uOption=u'longpresstime', uDefaultValue=u'1')
Globals.bConfigCheckForNetwork = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'checkfornetwork', uDefaultValue=u'1')
Globals.uNetworkCheckType = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'checknetworktype',vDefaultValue=OS_GetDefaultNetworkCheckMode())
Globals.uConfigCheckNetWorkAddress = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'checknetworkaddress', vDefaultValue='auto')
Globals.bClockWithSeconds = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'clockwithseconds', uDefaultValue=u'1')
Globals.bLongDate = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'longdate', uDefaultValue=u'0')
Globals.bLongDay = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'longday', uDefaultValue=u'0')
Globals.bLongMonth = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'longmonth', uDefaultValue=u'0')
Globals.bVibrate = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'vibrate', uDefaultValue=u'0')
Globals.bIgnoreAtlas = Config_GetDefault_Bool(oConfig=oConfig, uSection=u'ORCA', uOption=u'ignoreatlas', uDefaultValue=u'0')
Globals.fScreenSize = Config_GetDefault_Float(oConfig=oConfig, uSection=u'ORCA', uOption=u'screensize', uDefaultValue=u'0')
if Globals.fScreenSize == 0:
Globals.fScreenSize = math.sqrt(Globals.iAppWidth ** 2 + Globals.iAppHeight ** 2) / Metrics.dpi
self.InitOrientationVars()
Globals.uStretchMode = Config_GetDefault_Str(oConfig=oConfig, uSection=u'ORCA', uOption=u'stretchmode', vDefaultValue=OS_GetDefaultStretchMode())
Globals.oSound.ReadSoundVolumesFromConfig(oConfig=oConfig)
oConfig.write()
self.InitPathes() # init all used pathes
# clear cache in case of an update
if self.bClearCaches:
ClearAtlas()
# Create and read the definition ini file
Globals.oDefinitionConfigParser = OrcaConfigParser()
Globals.oDefinitionConfigParser.filename = Globals.oDefinitionPathes.oFnDefinitionIni.string
if Globals.oDefinitionPathes.oFnDefinitionIni.Exists():
Globals.oDefinitionConfigParser.read(Globals.oDefinitionPathes.oFnDefinitionIni.string)
uSection = Globals.uDefinitionName
uSection = uSection.replace(u' ', u'_')
if not Globals.oDefinitionConfigParser.has_section(uSection):
Globals.oDefinitionConfigParser.add_section(uSection)
return True
except Exception as e:
uMsg = u'Global Init:Unexpected error reading settings:' + ToUnicode(e)
Logger.critical(uMsg)
ShowErrorPopUp(uTitle='InitAndReadSettingsPanel: Fatal Error', uMessage=uMsg, bAbort=True, uTextContinue='', uTextQuit=u'Quit')
return False
# noinspection PyProtectedMember
def InitOrientationVars(self)->None:
"""
Getting the orientation of the App and sets to system vars for it
"""
Logger.debug(
u'Setting Orientation Variables #1: Screen Size: [%s], Width: [%s], Height: [%s], Orientation: [%s]' % (
str(Globals.fScreenSize), str(self._app_window._size[0]), str(self._app_window._size[1]),
str(Globals.uDeviceOrientation)))
OS_GetWindowSize()
if Globals.iAppWidth < Globals.iAppHeight:
Globals.uDeviceOrientation = 'portrait'
else:
Globals.uDeviceOrientation = 'landscape'
Globals.oRotation.Lock()
SetVar(uVarName = u'DEVICEORIENTATION', oVarValue = Globals.uDeviceOrientation)
SetVar(uVarName = u'SCREENSIZE', oVarValue = str(Globals.fScreenSize))
Logger.debug(u'Setting Orientation Variables: Screen Size: [%s], Width: [%s], Height: [%s], Orientation: [%s]' % (str(Globals.fScreenSize), str(Globals.iAppWidth), str(Globals.iAppHeight), str(Globals.uDeviceOrientation)))
def RepositoryUpdate(self)->None:
"""
Updates all loaded repository files when a new ORCA version has been detected
"""
if not Globals.bProtected:
Logger.info("New ORCA version detected, updating all repositories")
self.InitPathes()
Globals.oTheScreen.LoadLanguage()
StartWait()
Globals.oDownLoadSettings.UpdateAllInstalledRepositories(bForce = False)
self.bClearCaches = True
# self.RestartAfterRepositoryUpdate()
return True
return False
def InitRootDirs(self)->None:
"""
inits and creates the core pathes
"""
Globals.oPathResources = Globals.oPathRoot + u'resources'
Globals.oPathInterface = Globals.oPathRoot + u'interfaces'
Globals.oPathAction = Globals.oPathRoot + u'actions'
Globals.oPathCodesets = Globals.oPathRoot + u'codesets'
Globals.oPathSoundsRoot = Globals.oPathRoot + u'sounds'
if Globals.oParameter.oPathTmp.string:
Globals.oPathTmp = Globals.oParameter.oPathTmp
else:
Globals.oPathTmp = Globals.oPathRoot + u'tmp'
Globals.oPathDefinitionRoot = Globals.oPathRoot + u'definitions'
Globals.oPathSharedDocuments = Globals.oPathDefinitionRoot + u'shared_documents'
self.oPathSkinRoot = Globals.oPathRoot + u'skins'
Globals.oPathScripts = Globals.oPathRoot + u'scripts'
Globals.oPathLanguageRoot = Globals.oPathRoot + u'languages'
oPathGlobalSettings = Globals.oPathRoot + u'globalsettings'
Globals.oPathGlobalSettingsScripts = oPathGlobalSettings + u'scripts'
Globals.oPathGlobalSettingsInterfaces = oPathGlobalSettings + u'interfaces'
Globals.oPathTVLogos = Globals.oPathResources + "tvlogos"
Globals.oPathWizardTemplates = Globals.oPathRoot + u"wizard templates"
Globals.oPathTmp.Create()
Globals.oPathInterface.Create()
Globals.oPathResources.Create()
Globals.oPathCodesets.Create()
Globals.oPathSoundsRoot.Create()
Globals.oPathAction.Create()
self.oPathSkinRoot.Create()
Globals.oPathScripts.Create()
Globals.oPathDefinitionRoot.Create()
Globals.oPathSharedDocuments.Create()
Globals.oPathLanguageRoot.Create()
Globals.oPathWizardTemplates.Create()
oPathGlobalSettings.Create()
Globals.oPathGlobalSettingsScripts.Create()
Globals.oPathGlobalSettingsInterfaces.Create()
(Globals.oPathSharedDocuments + 'actions').Create()
def InitPathes(self)->None:
"""
init all used pathes by the app (root pathes needs to be initialized)
"""
Globals.oPathSkin = self.oPathSkinRoot + Globals.uSkinName
oPathCheck = Globals.oPathSharedDocuments + "elements"+("skin_" + Globals.uSkinName)
if oPathCheck.Exists():
Globals.oPathStandardPages = oPathCheck
else:
Globals.oPathStandardPages = (Globals.oPathSharedDocuments + "elements") +"skin_default"
Globals.oPathUserDownload = OS_GetUserDownloadsDataPath()
Globals.oPathStandardElements = Globals.oPathStandardPages
Globals.oPathStandardPages = Globals.oPathStandardPages + "pages"
Globals.oFnElementIncludeWrapper = cFileName(Globals.oPathStandardElements) + u'block_elementincludewrapper.xml'
Globals.oFnSkinXml = cFileName(Globals.oPathSkin) + u'skin.xml'
Globals.oPathSounds = cPath(Globals.oPathSoundsRoot) + self.uSoundsName
Globals.oFnSoundsXml = cFileName(Globals.oPathSounds) + u'sounds.xml'
Globals.oPathFonts = Globals.oPathResources + u'fonts'
Globals.oFnGestureLog = cFileName(Globals.oPathUserDownload) + u'gestures.log'
Globals.oFnLangInfo = cFileName(Globals.oPathLanguageRoot + Globals.uLanguage) + u'langinfo.xml'
Globals.oFnAction = cFileName(Globals.oPathAction) + u'actions.xml'
Globals.oFnActionEarlyAppStart = cFileName(Globals.oPathAction) + u'actionsearly.xml'
Globals.oFnActionFreshInstall = cFileName(Globals.oPathAppReal+u'actions') + u'actionsfallback.xml'
Globals.oFnCredits = cFileName(Globals.oPathAppReal) + u'credits.txt'
Globals.oPathGestures = cPath(Globals.oPathAction)
Globals.oFnGestures = cFileName(Globals.oPathGestures) + u'gestures.xml'
Globals.oFnLog = cFileName('').ImportFullPath(uFnFullName=FileHandler.filename)
Globals.oFnLicense = cFileName(Globals.oPathAppReal) + u'license.txt'
Globals.oPathCookie = Globals.oPathTmp
Globals.uScriptLanguageFileTail = u'/languages/'+Globals.uLanguage+'/strings.xml'
Globals.uScriptLanguageFallBackTail = u'/languages/English/strings.xml'
Globals.oFnInterfaceLanguage = cFileName(Globals.oPathInterface + u'/%s/languages/' + Globals.uLanguage) + u'strings.xml'
Globals.oFnInterfaceLanguageFallBack = cFileName(Globals.oPathInterface + u'/%s/languages/English') + u'strings.xml'
oDefinitionPathes = cDefinitionPathes(uDefinitionName=Globals.uDefinitionName)
Globals.dDefinitionPathes[Globals.uDefinitionName] = oDefinitionPathes
SetDefinitionPathes(uDefinitionName=Globals.uDefinitionName)
Globals.aLogoPackFolderNames = Globals.oPathTVLogos.GetFolderList(bFullPath=False)
if Globals.oDefinitionPathes.oPathDefinition.Exists():
Globals.oDefinitionPathes.oPathDefinitionAtlas.Create()
def build_settings(self, settings):
"""
Called by the framework to build the settings json strings
"""
Build_Settings(settings)
def ShowSettings(self):
"""
we use not the native function, maybe we add some more functions later
"""
return self.open_settings()
def On_CloseSetting(self, **kwArgs):
""" Override the defaults, does nothing """
pass
# noinspection PyUnusedLocal
def fdo_config_change_load_definition(self, *largs):
""" loads a definition triggered by a configuration change """
self._on_config_change_on_definitionlistchange()
if not self.oWaitMessage is None:
self.oWaitMessage.ClosePopup()
# noinspection PyMethodMayBeStatic
def _on_config_change_on_definitionlistchange(self):
# reloads the definition list and restarts the settings dialog
Globals.aDefinitionList = (Globals.oPathRoot + 'definitions').GetFolderList()
Globals.aDefinitionList.remove('shared_documents')
Globals.oTheScreen.UpdateSetupWidgets()
def ReStart(self)->None :
"""
Restarts the whole ORCA App
"""
Logger.debug("Restarting ORCA....")
Globals.oTheScreen.ShowSplash()
# Whole restart
# Stop the timer
Globals.oTheScreen.DeInit()
# Close the settings
Globals.bInit = True
self.close_settings()
Globals.bInit = False
# # Ensure, settings are recreated
self._app_settings = None
# Write current changes
kivyConfig.write()
# Delete the timers
Globals.oEvents.oAllTimer.DeleteAllTimer()
# Cancel Queue Events
ClearQueue()
# stop interfaces
Globals.oInterFaces.DeInit()
# Reset all the screen vars
Globals.oTheScreen.InitVars()
# delete all varlinks
DelAllVarLinks()
# And here we go again
StopWait()
Clock.schedule_once(self.Init_ReadConfig, 0)
# noinspection PyMethodMayBeStatic
def DeInit(self) ->None:
"""
Call to stop Interfaces, Queues, Timer, Scripts
"""
Globals.oNotifications.SendNotification(uNotification="on_stopapp")
def StopApp(self):
"""
Stops the ORCA App
"""
Logger.debug("Quit App on request")
# self.DeInit()
Globals.oSound.PlaySound(uSoundName='shutdown')
fSleep(fSeconds=0.5)
if Globals.oPathUserDownload:
Globals.oFnLog.Copy(oNewFile=cFileName(Globals.oPathUserDownload) + 'orca.log')
# Globals.oFnLog.Delete()
self.stop()
sys.exit(0)
def on_pause(self):
"""
Called by the system, if the app goes on sleep
Pauses Interfaces, Scripts, Timers
"""
if not Globals.bOnSleep:
Logger.debug("System is going to pause")
# We prevent any on_pause activities as long we didn't finish starting actions
if Globals.oTheScreen.uCurrentPageName=="":
return True
Globals.oNotifications.SendNotification(uNotification="on_pause")
Globals.bOnSleep = True
else:
Logger.warning("Duplicate on_pause, this should not happen")
# Globals.bOnSleep = False
return True
def on_resume(self):
""" this is the normal entry point, if android would work """
Globals.oNotifications.SendNotification(uNotification="on_resume")
Globals.bOnSleep = False
return True
def open_settings(self, *largs):
"""
Creates the settings panel (framework function)
:param largs:
:return:
"""
if Globals.oWinOrcaSettings is None:
return App.open_settings(self, *largs)
return False
def close_settings(self, *largs):
"""
close the settings pages and shows the first page
(if we did not start the definition, just continue with ini..)
"""
# If initialisation failed, maybe the user entered a different location for ORCA Files, so lets restart
if not Globals.bInit:
self.ReStart()
if Globals.oWinOrcaSettings is None:
return App.close_settings(self, *largs)
Globals.oNotifications.SendNotification(uNotification="closesetting_orca")
return True
def _install_settings_keys(self, window):
pass
# noinspection PyUnusedLocal
def hook_keyboard(self, window, key, *largs):
"""
handles the esc key to stop the app, and other keys
"""
key = str(key)
Logger.debug('hook_keyboard: key:' + key)
dRet = Globals.oNotifications.SendNotification(uNotification="on_key",**{"key":key,"window":window})
if dRet:
key = dRet.get("key",key)
# print ("Key:"+key)
Globals.oNotifications.SendNotification(uNotification="on_key_"+key)
if not Globals.oTheScreen.oCurrentPage is None:
return Globals.oTheScreen.oCurrentPage.OnKey(window, 'key_' +key)
else:
if key == '27':
self.StopApp()
return False
# noinspection PyUnusedLocal
def on_config_change_change_definition(self, *largs):
"""
Called from the dialog, when the user confirms to change the definition
"""
Logger.debug(u'Definition has changed, restarting ORCA')
self.ReStart()
def fktYesClose(self):
"""
Function to called, if the user chosen to stop the app on a critical initialisation error
"""
self.StopApp()
def on_stop(self) ->bool:
"""
System Callback, which will be called when the app terminates
"""
# Logger.debug('OnStop')
if not self.bDeInitDone:
self.bDeInitDone = True
self.DeInit()
return True
|
thica/ORCA-Remote
|
src/ORCA/App.py
|
Python
|
gpl-3.0
| 45,493
|
[
"ORCA"
] |
0dd9708c964cc05a88c2653a9f437bc6f45a3ea3046fde58ed6ee7d090a48bc8
|
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# Special thanks to http://www.faculty.ucr.edu/~mmaduro/random.htm for the
# random DNA generator.
# These tests confirm that StripedSmithWaterman returns the same results as
# SSW. We don't test for correctness of those results (i.e., we assume that
# ssw.c and ssw.h are correct) as that testing is beyond the scope of skbio.
# Furthermore all expected results are created by running StripedSmithWaterman
# the resulting alignments are verified by hand. Creating tests from the base
# C API is impractical at this time.
from unittest import TestCase, main
from skbio import local_pairwise_align_ssw
from skbio.alignment import StripedSmithWaterman, AlignmentStructure
from skbio.alignment._pairwise import blosum50
class TestSSW(TestCase):
align_attributes = [
"optimal_alignment_score", "suboptimal_alignment_score",
"target_begin", "target_end_optimal", "target_end_suboptimal",
"query_begin", "query_end", "cigar", "query_sequence",
"target_sequence"
]
def _check_alignment(self, alignment, expected):
for attribute in self.align_attributes:
# The first element of this tuple is to identify
# the broken sequence if one should fail
self.assertEqual((expected['target_sequence'],
expected[attribute]),
(alignment['target_sequence'],
alignment[attribute]))
def _check_argument_with_inequality_on_optimal_align_score(
self,
query_sequences=None,
target_sequences=None,
arg=None,
default=None,
i_range=None,
compare_lt=None,
compare_gt=None):
iterable_kwarg = {}
default_kwarg = {}
default_kwarg[arg] = default
for query_sequence in query_sequences:
for target_sequence in target_sequences:
for i in i_range:
iterable_kwarg[arg] = i
query1 = StripedSmithWaterman(query_sequence,
**iterable_kwarg)
align1 = query1(target_sequence)
query2 = StripedSmithWaterman(query_sequence,
**default_kwarg)
align2 = query2(target_sequence)
if i == default:
self.assertEqual(align1.optimal_alignment_score,
align2.optimal_alignment_score)
if i < default:
compare_lt(align1.optimal_alignment_score,
align2.optimal_alignment_score)
if i > default:
compare_gt(align1.optimal_alignment_score,
align2.optimal_alignment_score)
def _check_bit_flag_sets_properties_falsy_or_negative(
self,
query_sequences=None,
target_sequences=None,
arg_settings=[],
properties_to_null=[]):
kwarg = {}
def falsy_or_negative(alignment, prop):
if type(alignment[prop]) is int:
return alignment[prop] < 0
else:
return not alignment[prop]
for query_sequence in query_sequences:
for target_sequence in target_sequences:
for arg, setting in arg_settings:
kwarg[arg] = setting
query = StripedSmithWaterman(query_sequence, **kwarg)
alignment = query(target_sequence)
for prop in properties_to_null:
self.assertTrue(falsy_or_negative(alignment, prop))
# Every property not in our null list
for prop in [p for p in self.align_attributes
if p not in properties_to_null]:
self.assertFalse(falsy_or_negative(alignment, prop))
class TestStripedSmithWaterman(TestSSW):
def test_object_is_reusable(self):
q_seq = "AGGGTAATTAGGCGTGTTCACCTA"
expected_alignments = [
{
'optimal_alignment_score': 10,
'suboptimal_alignment_score': 10,
'query_begin': 4,
'query_end': 8,
'target_begin': 3,
'target_end_optimal': 7,
'target_end_suboptimal': 34,
'cigar': '5M',
'query_sequence': q_seq,
'target_sequence': ('TTATAATTTTCTTATTATTATCAATATTTATAATTTGATTT'
'TGTTGTAAT')
},
{
'optimal_alignment_score': 36,
'suboptimal_alignment_score': 16,
'query_begin': 0,
'query_end': 23,
'target_begin': 6,
'target_end_optimal': 29,
'target_end_suboptimal': 13,
'cigar': '8M1D8M1I7M',
'query_sequence': q_seq,
'target_sequence': 'AGTCGAAGGGTAATATAGGCGTGTCACCTA'
},
{
'optimal_alignment_score': 16,
'suboptimal_alignment_score': 0,
'query_begin': 0,
'query_end': 7,
'target_begin': 6,
'target_end_optimal': 13,
'target_end_suboptimal': 0,
'cigar': '8M',
'query_sequence': q_seq,
'target_sequence': 'AGTCGAAGGGTAATA'
},
{
'optimal_alignment_score': 8,
'suboptimal_alignment_score': 8,
'query_begin': 0,
'query_end': 3,
'target_begin': 7,
'target_end_optimal': 10,
'target_end_suboptimal': 42,
'cigar': '4M',
'query_sequence': q_seq,
'target_sequence': ('CTGCCTCAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGG'
'CGCAGGGGC')
},
{
'optimal_alignment_score': 48,
'suboptimal_alignment_score': 16,
'query_begin': 0,
'query_end': 23,
'target_begin': 0,
'target_end_optimal': 23,
'target_end_suboptimal': 7,
'cigar': '24M',
'query_sequence': q_seq,
'target_sequence': q_seq
}
]
query = StripedSmithWaterman(q_seq)
results = []
for expected in expected_alignments:
alignment = query(expected['target_sequence'])
results.append(alignment)
for result, expected in zip(results, expected_alignments):
self._check_alignment(result, expected)
def test_regression_on_instantiation_arguments(self):
expected = {
'optimal_alignment_score': 23,
'suboptimal_alignment_score': 10,
'query_begin': 0,
'query_end': 16,
'target_begin': 0,
'target_end_optimal': 20,
'target_end_suboptimal': 4,
'cigar': '6M4D11M',
'query_sequence': 'AAACGATAAATCCGCGTA',
'target_sequence': 'AAACGACTACTAAATCCGCGTGATAGGGGA'
}
query = StripedSmithWaterman(expected['query_sequence'],
gap_open_penalty=5,
gap_extend_penalty=2,
score_size=2,
mask_length=15,
mask_auto=True,
score_only=False,
score_filter=None,
distance_filter=None,
override_skip_babp=False,
protein=False,
match_score=2,
mismatch_score=-3,
substitution_matrix=None,
suppress_sequences=False,
zero_index=True)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_protein_sequence_is_usable(self):
expected = {
'optimal_alignment_score': 316,
'suboptimal_alignment_score': 95,
'query_begin': 0,
'query_end': 52,
'target_begin': 0,
'target_end_optimal': 52,
'target_end_suboptimal': 18,
'cigar': '15M1D15M1I22M',
'query_sequence': ('VHLTGEEKSAVAALWGKVNVDEVGGEALGRXLLVVYPWTQRFFESF'
'SDLSTPDABVMSNPKVKAHGK'),
'target_sequence': ('VHLTPEEKSAVTALWBGKVNVDEVGGEALGRLLVVYPWTQRFFES'
'FGDLSTPD*')
}
query = StripedSmithWaterman(expected['query_sequence'],
protein=True,
substitution_matrix=blosum50)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_lowercase_is_valid_sequence(self):
expected = {
'optimal_alignment_score': 23,
'suboptimal_alignment_score': 10,
'query_begin': 0,
'query_end': 16,
'target_begin': 0,
'target_end_optimal': 20,
'target_end_suboptimal': 4,
'cigar': '6M4D11M',
'query_sequence': 'aaacgataaatccgcgta',
'target_sequence': 'aaacgactactaaatccgcgtgatagggga'
}
query = StripedSmithWaterman(expected['query_sequence'])
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_align_with_N_in_nucleotide_sequence(self):
expected = {
'optimal_alignment_score': 9,
'suboptimal_alignment_score': 0,
'query_begin': 0,
'query_end': 8,
'target_begin': 0,
'target_end_optimal': 9,
'target_end_suboptimal': 0,
'cigar': '4M1D5M',
'query_sequence': 'ACTCANNATCGANCTAGC',
'target_sequence': 'ACTCGAAAATGTNNGCA'
}
query = StripedSmithWaterman(expected['query_sequence'])
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_arg_match_score(self):
query_sequences = [
"TTTTTTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTCAATATAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"CTGCCTCAAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
"AGGGTAATTTTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_argument_with_inequality_on_optimal_align_score(
query_sequences=query_sequences,
target_sequences=target_sequences,
arg='match_score',
default=2,
i_range=range(0, 5),
compare_lt=self.assertLess,
compare_gt=self.assertGreater
)
# The above is a strict bound, so we don't need a expected align
def test_arg_mismatch_score(self):
query_sequences = [
"TTATAATTAATTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAAGGGGTATAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"CTGCCTCAGGGGCGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
"AGGGTAATTAGCGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_argument_with_inequality_on_optimal_align_score(
query_sequences=query_sequences,
target_sequences=target_sequences,
arg='mismatch_score',
default=-3,
i_range=range(-6, 1),
# These are intentionally inverted
compare_lt=self.assertLessEqual,
compare_gt=self.assertGreaterEqual
)
# The above is not a strict bound, so lets use an expected align
# to plug the hole where every align is exactly equal to default
expected = {
'optimal_alignment_score': 8,
'suboptimal_alignment_score': 0,
'query_begin': 5,
'query_end': 8,
'target_begin': 10,
'target_end_optimal': 13,
'target_end_suboptimal': 0,
'cigar': '4M',
'query_sequence': 'AGAGGGTAATCAGCCGTGTCCACCGGAACACAACGCTATCGGGCGA',
'target_sequence': 'GTTCGCCCCAGTAAAGTTGCTACCAAATCCGCATG'
}
query = StripedSmithWaterman(expected['query_sequence'],
mismatch_score=-8)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_arg_matrix_overrides_match_and_mismatch(self):
query_sequences = [
"TTATAATTAATTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAAGGGGTATAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"CTGCCTCAGGGGCGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
"AGGGTAATTAGCGCGTGTTCACCTA"
]
target_sequences = query_sequences
matrix = { # This is a biologically meaningless matrix
"A": {"A": 4, "T": -1, "C": -2, "G": -3, "N": 4},
"T": {"A": -1, "T": 1, "C": -1, "G": -4, "N": 1},
"C": {"A": -2, "T": -1, "C": 10, "G": 1, "N": 1},
"G": {"A": -3, "T": -4, "C": 1, "G": 3, "N": 1},
"N": {"A": 4, "T": 1, "C": 1, "G": 1, "N": 0}
}
for query_sequence in query_sequences:
for target_sequence in target_sequences:
query1 = StripedSmithWaterman(query_sequence)
align1 = query1(target_sequence)
query2 = StripedSmithWaterman(query_sequence,
substitution_matrix=matrix)
align2 = query2(target_sequence)
self.assertNotEqual(align1.optimal_alignment_score,
align2.optimal_alignment_score)
def test_arg_gap_open_penalty(self):
query_sequences = [
"TTATAATTTTCTTAGTTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCCGAAGGGTAATATAGGCGTGTCACCTA",
"AGTCGAAGGCGGTAATA",
"CTGCCTCGGCAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
"AGGGTAATTAAAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_argument_with_inequality_on_optimal_align_score(
query_sequences=query_sequences,
target_sequences=target_sequences,
arg='gap_open_penalty',
default=5,
i_range=range(1, 12),
# These are intentionally inverted
compare_lt=self.assertGreaterEqual,
compare_gt=self.assertLessEqual
)
# The above is not a strict bound, so lets use an expected align
# to plug the hole where every align is exactly equal to default
expected = {
'optimal_alignment_score': 51,
'suboptimal_alignment_score': 20,
'query_begin': 0,
'query_end': 37,
'target_begin': 0,
'target_end_optimal': 29,
'target_end_suboptimal': 9,
'cigar': '5M4I3M3I1M1I21M',
'query_sequence': 'TAGAGATTAATTGCCACATTGCCACTGCCAAAATTCTG',
'target_sequence': 'TAGAGATTAATTGCCACTGCCAAAATTCTG'
}
query = StripedSmithWaterman(expected['query_sequence'],
gap_open_penalty=1)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_arg_gap_extend_penalty(self):
query_sequences = [
"TTATAATTTTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAATACTAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"CTGCCTCAGGGGGAGGCAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
"AGGGTAATTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_argument_with_inequality_on_optimal_align_score(
query_sequences=query_sequences,
target_sequences=target_sequences,
arg='gap_extend_penalty',
default=2,
i_range=range(1, 10),
# These are intentionally inverted
compare_lt=self.assertGreaterEqual,
compare_gt=self.assertLessEqual
)
# The above is not a strict bound, so lets use an expected align
# to plug the hole where every align is exactly equal to default
expected = {
'optimal_alignment_score': 9,
'suboptimal_alignment_score': 8,
'query_begin': 6,
'query_end': 12,
'target_begin': 7,
'target_end_optimal': 13,
'target_end_suboptimal': 38,
'cigar': '7M',
'query_sequence': 'TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG',
'target_sequence': 'GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC'
}
query = StripedSmithWaterman(expected['query_sequence'],
gap_extend_penalty=10)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_arg_score_only(self):
query_sequences = [
"TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
"AGGGTATTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_bit_flag_sets_properties_falsy_or_negative(
query_sequences=query_sequences,
target_sequences=target_sequences,
arg_settings=[('score_only', True)],
properties_to_null=['query_begin', 'target_begin', 'cigar']
)
def test_arg_score_filter_is_used(self):
query_sequences = [
"TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
"AGGGTATTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_bit_flag_sets_properties_falsy_or_negative(
query_sequences=query_sequences,
target_sequences=target_sequences,
# score_filter will force a BABP and cigar to be falsy
arg_settings=[('score_filter', 9001)],
properties_to_null=['query_begin', 'target_begin', 'cigar']
)
def test_arg_distance_filter_is_used(self):
query_sequences = [
"TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
"AGGGTATTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_bit_flag_sets_properties_falsy_or_negative(
query_sequences=query_sequences,
target_sequences=target_sequences,
# distance_filter will force cigar to be falsy only
arg_settings=[('distance_filter', 1)],
properties_to_null=['cigar']
)
def test_arg_override_skip_babp(self):
query_sequences = [
"TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
"AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
"AGTCGAAGGGTAATA",
"AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
"AGGGTATTAGGCGTGTTCACCTA"
]
target_sequences = query_sequences
self._check_bit_flag_sets_properties_falsy_or_negative(
query_sequences=query_sequences,
target_sequences=target_sequences,
# score_filter will force a BABP and cigar to be falsy if not for
# override_skip_babp preventing this for all but the cigar
arg_settings=[('override_skip_babp', True),
('score_filter', 9001)],
properties_to_null=['cigar']
)
def test_arg_zero_index_changes_base_of_index_to_0_or_1(self):
expected_alignments = [
({
'optimal_alignment_score': 100,
'suboptimal_alignment_score': 44,
'query_begin': 5,
'query_end': 54,
'target_begin': 0,
'target_end_optimal': 49,
'target_end_suboptimal': 21,
'cigar': '50M',
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, True),
({
'optimal_alignment_score': 100,
'suboptimal_alignment_score': 44,
'query_begin': 6,
'query_end': 55,
'target_begin': 1,
'target_end_optimal': 50,
'target_end_suboptimal': 22,
'cigar': '50M',
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, False)
]
for expected, z in expected_alignments:
query = StripedSmithWaterman(expected['query_sequence'],
zero_index=z)
alignment = query(expected['target_sequence'])
self._check_alignment(alignment, expected)
def test_arg_suppress_sequences(self):
expected = {
'optimal_alignment_score': 100,
'suboptimal_alignment_score': 44,
'query_begin': 5,
'query_end': 54,
'target_begin': 0,
'target_end_optimal': 49,
'target_end_suboptimal': 21,
'cigar': '50M',
'query_sequence': '',
'target_sequence': ''
}
query = StripedSmithWaterman(
"AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCCGGGCGGGGC",
suppress_sequences=True)
alignment = query("CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCCGGGCGGGGC")
self._check_alignment(alignment, expected)
class TestAlignStripedSmithWaterman(TestSSW):
def _check_Alignment_to_AlignmentStructure(self, alignment, structure):
self.assertEqual(alignment.score(), structure.optimal_alignment_score)
self.assertEqual(str(alignment[0]), structure.aligned_query_sequence)
self.assertEqual(str(alignment[1]), structure.aligned_target_sequence)
if structure.query_begin == -1:
self.assertEqual(alignment.start_end_positions(), None)
else:
for (start, end), (expected_start, expected_end) in \
zip(alignment.start_end_positions(),
[(structure.query_begin,
structure.query_end),
(structure.target_begin,
structure.target_end_optimal)]):
self.assertEqual(start, expected_start)
self.assertEqual(end, expected_end)
def test_same_as_using_StripedSmithWaterman_object(self):
query_sequence = 'ATGGAAGCTATAAGCGCGGGTGAG'
target_sequence = 'AACTTATATAATAAAAATTATATATTCGTTGGGTTCTTTTGATATAAATC'
query = StripedSmithWaterman(query_sequence)
align1 = query(target_sequence)
align2 = local_pairwise_align_ssw(query_sequence,
target_sequence)
self._check_Alignment_to_AlignmentStructure(align2, align1)
def test_kwargs_are_usable(self):
kwargs = {}
kwargs['mismatch_score'] = -2
kwargs['match_score'] = 5
query_sequence = 'AGGGTAATTAGGCGTGTTCACCTA'
target_sequence = 'TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG'
query = StripedSmithWaterman(query_sequence, **kwargs)
align1 = query(target_sequence)
align2 = local_pairwise_align_ssw(query_sequence,
target_sequence, **kwargs)
self._check_Alignment_to_AlignmentStructure(align2, align1)
class TestAlignmentStructure(TestSSW):
def mock_object_factory(self, dictionary):
class MockAlignmentStructure(AlignmentStructure):
def __init__(self, _a, _b, _c):
for key in dictionary:
setattr(self, key, dictionary[key])
return MockAlignmentStructure(None, None, 0)
def test_works_for_dot_and_square_bracket_access(self):
q_seq = "AGGGTAATTAGGCGTGTTCACCTA"
query = StripedSmithWaterman(q_seq)
alignment = query("TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG")
for accessible in self.align_attributes:
self.assertEqual(getattr(alignment, accessible),
alignment[accessible])
def test_is_zero_based_returns_true_if_index_base_is_zero(self):
expected_alignments = [
({
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, True),
({
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, False)
]
for expected, z in expected_alignments:
query = StripedSmithWaterman(expected['query_sequence'],
zero_index=z)
alignment = query(expected['target_sequence'])
self.assertEqual(z, alignment.is_zero_based())
def test_set_zero_based_changes_the_index_base(self):
expected_alignments = [
({
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, True),
({
'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
'CCCCGGGCGGGGC'),
'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
'GGGCGGGGC')
}, False)
]
for expected, z in expected_alignments:
query = StripedSmithWaterman(expected['query_sequence'],
zero_index=z)
alignment = query(expected['target_sequence'])
alignment.set_zero_based(not z)
self.assertEqual(not z, alignment.is_zero_based())
def test__get_aligned_sequences(self):
generic_sequence = "123456789abcdefghijklmnopqrstuvwxyz"
tests = [ # `end_after_cigar` is how far end extends beyond the cigar.
# Negative values on this should not be possible with SSW
{
'cigar_tuples': [
(4, 'M'), (3, 'I'), (1, 'D'), (15, 'M')
],
'begin': 4,
'end_after_cigar': 2,
'gap_type': 'I',
'expected': "5678---9abcdefghijklmnop"
},
{
'cigar_tuples': [
(12, 'M')
],
'begin': 10,
'end_after_cigar': 0,
'gap_type': 'D',
'expected': "bcdefghijklm"
},
{
'cigar_tuples': [
(10, 'D'), (1, 'M'), (3, 'I'), (2, 'M')
],
'begin': 0,
'end_after_cigar': 5,
'gap_type': 'I',
'expected': "1---2345678"
},
{
'cigar_tuples': [
(10, 'D'), (1, 'M'), (3, 'I'), (2, 'M')
],
'begin': 3,
'end_after_cigar': 0,
'gap_type': 'D',
'expected': "----------456"
},
{
'cigar_tuples': [
(1, 'I'), (4, 'M'), (3, 'I'), (1, 'D'), (8, 'M'), (8, 'D'),
(2, 'I'), (6, 'M'), (1, 'I')
],
'begin': 4,
'end_after_cigar': 3,
'gap_type': 'I',
'expected': "-5678---9abcdefg--hijklm-nop"
}
]
for test in tests:
mock_object = self.mock_object_factory({})
# Because SSW's output is [a, b] and Python's list ranges use
# [a, b) a 1 is added in the calculation of aligned sequences.
# We just have to subtract 1 while we are testing with the easy to
# verify interface of `end_after_cigar` to cancel this range effect
# out.
end = test['end_after_cigar'] - 1 + test['begin'] + \
sum([le if t == 'M' else 0 for le, t in test['cigar_tuples']])
self.assertEqual(test['expected'],
AlignmentStructure._get_aligned_sequence(
mock_object, generic_sequence,
test['cigar_tuples'], test['begin'],
end, test['gap_type']))
def test_aligned_query_target_sequence(self):
query = StripedSmithWaterman("AGGGTAATTAGGCGTGTTCACCTA")
alignment = query("AGTCGAAGGGTAATATAGGCGTGTCACCTA")
self.assertEqual("AGGGTAATATAGGCGT-GTCACCTA",
alignment.aligned_target_sequence)
self.assertEqual("AGGGTAAT-TAGGCGTGTTCACCTA",
alignment.aligned_query_sequence)
def test_aligned_query_target_sequence_with_suppressed_sequences(self):
query = StripedSmithWaterman("AGGGTAATTAGGCGTGTTCACCTA",
suppress_sequences=True)
alignment = query("AGTCGAAGGGTAATATAGGCGTGTCACCTA")
self.assertEqual(None, alignment.aligned_target_sequence)
self.assertEqual(None, alignment.aligned_query_sequence)
if __name__ == '__main__':
main()
|
Kleptobismol/scikit-bio
|
skbio/alignment/tests/test_ssw.py
|
Python
|
bsd-3-clause
| 31,243
|
[
"scikit-bio"
] |
f063b4dd85d5bcd5e363acf04e00154b1a90549d21171f251c375fab0ad3feb0
|
#!/usr/local/bin/python2.6
###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export;
reload(export)
import ExpressionBuilder;
reload(ExpressionBuilder)
import ExonAnalyze_module;
reload(ExonAnalyze_module)
import ExonAnnotate_module;
reload(ExonAnnotate_module)
import ResultsExport_module
import GO_Elite
import time
import webbrowser
import random
import traceback
try:
import multiprocessing as mlp
except Exception:
mlp = None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try:
import ImageTk
except Exception:
from PIL import ImageTk
except Exception:
None #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1 = {};
list1 = [];
k = 0
for i in item:
try:
db1[i] = []
except TypeError:
db1[tuple(i)] = []; k = 1
for i in db1:
if k == 0:
list1.append(i)
else:
list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line, '\n', '')
line = string.replace(line, '\c', '')
data = string.replace(line, '\r', '')
data = string.replace(data, '"', '')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var]) > 500:
print var, len(globals()[var])
except Exception:
null = []
def clearObjectsFromMemory(db_to_clear):
db_keys = {}
try:
for key in db_to_clear: db_keys[key] = []
except Exception:
for key in db_to_clear: del key ### if key is a list
for key in db_keys:
try:
del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception:
del key ### For plain lists
def importGeneric(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename, filter_db):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename, filter_db):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
null = filter_db[t[0]]
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
except Exception:
null = []
return key_db
def importGenericDBList(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1:])
except Exception:
key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir, term):
dir_list = unique.read_directory(dir)
dir_list2 = []
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2) == 0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort();
dir_list2.reverse()
if len(dir_list2) > 0:
return dir_list2[0]
else:
return ''
def openFile(file_dir):
if os.name == 'nt':
try:
os.startfile('"' + file_dir + '"')
except Exception:
os.system('open "' + file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + file_dir + '"')
def openCytoscape(parent_dir, application_dir, application_name):
cytoscape_dir = FindDir(parent_dir, application_dir);
cytoscape_dir = filepath(parent_dir + '/' + cytoscape_dir)
app_dir = FindDir(cytoscape_dir, application_name)
app_dir = cytoscape_dir + '/' + app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir + '/Cytoscape'
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir + '/cytoscape.jar'
main_path = cytoscape_dir + '/cytoscape.CyMain'
plugins_path = cytoscape_dir + '/plugins'
os.system(
'java -Dswing.aatext=true -Xss5M -Xmx512M -jar ' + jar_path + ' ' + main_path + ' -p ' + plugins_path + ' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try:
openFile(app_dir2); print 'Cytoscape opened:', app_dir2
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir2)
except Exception:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
else:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir + '/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir + '/Cytoscape'; #cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir + '/.install4j/Cytoscape.png'; #cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]' + '\n')
data.write('Type=Application' + '\n')
data.write('Name=Cytoscape' + '\n')
data.write('Exec=/bin/sh "' + cytoscape_desktop + '"' + '\n')
data.write('Icon=' + cytoscape_png + '\n')
data.write('Categories=Application;' + '\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type, probeset_class, splice_event, constitutive_call, external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or len(external_exonid) > 2): constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event) < 2 and len(
external_exonid) < 2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset, constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron", "xon", "strangeSplice", "Prime", "3", "5", "C-term"];
as_call = 0
splice_annotations2 = ["ntron", "assette", "strangeSplice", "Prime", "3", "5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self):
return self._probeset
def setProbeset(self, probeset):
self._probeset = probeset
def ExonID(self):
return self._exonid
def setDisplayExonID(self, exonid):
self._exonid = exonid
def GeneID(self):
return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self):
return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self):
return self._block_structure
def SecondaryExonID(self):
return self._block_exon_ids
def setSecondaryExonID(self, ids):
self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome;
self._strand = strand
self._start = probeset_start;
self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome() + ':' + self.ProbeStart() + '-' + self.ProbeStop() + '(' + self.Strand() + ')'
return location
def Chromosome(self):
return self._chromosome
def Strand(self):
return self._strand
def ProbeStart(self):
return self._start
def ProbeStop(self):
return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self):
return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(), '|')
return external_exonid_list
def Constitutive(self):
return self._constitutive_status
def setTranscriptCluster(self, secondary_geneid):
self._secondary_geneid = secondary_geneid
def setNovelExon(self, novel_exon):
self._novel_exon = novel_exon
def NovelExon(self):
return self._novel_exon
def SecondaryGeneID(self):
return self._secondary_geneid
def setExonRegionID(self, exon_region):
self._exon_region = exon_region
def ExonRegionID(self):
return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event) != 0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self):
return self._splicing_call
def SpliceJunctions(self):
return self._splice_junctions
def Delete(self):
del self
def Report(self):
output = self.ArrayType() + '|' + self.ExonID() + '|' + self.ExternalGeneID()
return output
def __repr__(self):
return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self, affygene, exons, ensembl, block_exon_ids, block_structure, probe_type_call):
self._geneid = affygene;
self._external_gene = ensembl;
self._exonid = exons;
self._secondary_geneid = ensembl
self._probeset_type = probe_type_call;
self._block_structure = block_structure;
self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene':
self._constitutive_status = 'yes'
else:
self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event,
splice_junctions, splicing_call):
self._geneid = ensembl_gene_id;
self._external_gene = ensembl_gene_id;
self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region = exon_region;
self._splicing_event = splicing_event;
self._splice_junctions = splice_junctions;
self._splicing_call = splicing_call
if self._exonid[0] == 'U':
self._probeset_type = 'UTR'
elif self._exonid[0] == 'E':
self._probeset_type = 'exonic'
elif self._exonid[0] == 'I':
self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, splicing_call):
self._geneid = ensembl_gene_id;
self._exonid = exon_id;
self._splicing_call = splicing_call
def importSplicingAnnotations(array_type, Species, probeset_type, avg_ss_for_all, root_dir):
global filter_probesets_by;
filter_probesets_by = probeset_type
global species;
species = Species;
global avg_all_for_ss;
avg_all_for_ss = avg_ss_for_all;
global exon_db;
exon_db = {}
global summary_data_db;
summary_data_db = {};
global remove_intronic_junctions;
remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
filtered_arrayids = {};
filter_status = 'no'
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename, array_type, filtered_arrayids, filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0;
export_exon_regions = 'yes'
if 'fake' in array_type:
array_type = string.replace(array_type, '-fake', ''); original_arraytype = 'RNASeq'
else:
original_arraytype = array_type
if filter_status == 'no':
global gene_transcript_cluster_db; gene_transcript_cluster_db = {}; gene_transcript_cluster_db2 = {}; global last_exon_region_db; last_exon_region_db = {}
else:
new_exon_db = {}
fn = filepath(filename)
last_gene = ' ';
last_exon_region = ''
constitutive_probeset_db = {};
constitutive_gene = {}
count = 0;
x = 0;
constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset, affygene, exons, transcript_num, transcripts, probe_type_call, ensembl, block_exon_ids, block_structure, comparison_info = string.split(
probeset_data, '\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0:
x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene] = []
if probe_type_call == 'gene':
constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else:
constitutive_call = 'no'
include_call, constitutive_call = ProbesetCalls(array_type, '', exons, constitutive_call, '')
if include_call == 'yes':
probe_data = AltMouseData(affygene, exons, ensembl, block_exon_ids, block_structure,
probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0:
x = 1
else:
try:
probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(
probeset_data, '\t')
except Exception:
print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call, constitutive_call = ProbesetCalls(array_type, affy_class, splicing_event,
constitutive_call_probeset, external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id, '-',
'.'); exon_region = string.replace(
exon_region, '-', '.')
if ensembl_gene_id != last_gene:
new_gene = 'yes'
else:
new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[
last_gene] = last_exon_region
else:
last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region) > 1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset != constitutive_call: probesets_included_by_new_evidence += 1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no';
as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception:
null = []
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call,
exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError:
null = []
else:
exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try:
constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception:
constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try:
constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError:
constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0;
genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene] = []
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene] = []
original_probesets_add += 1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try:
last_exon_region_db = RNASeq.importExonAnnotations(species, 'distal-exon', '')
except Exception:
null = []
constitutive_original = [];
constitutive_gene = []
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(exon_db), id_name, 'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering', id_name, 'based on splicing evidence, added back'
end_time = time.time();
time_diff = int(end_time - begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes':
return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try:
exportDenominatorGenes(genes_being_analyzed)
except Exception:
null = []
return constitutive_probeset_db, exon_db, genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir + 'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try:
gene = annotate_db[gene].ExternalGeneID()
except KeyError:
null = []
goelite_data.write(gene + '\t' + systemcode + '\n')
try:
goelite_data.close()
except Exception:
null = []
def performExpressionAnalysis(filename, constitutive_probeset_db, exon_db, annotate_db, dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase;
global original_conditions;
global normalization_method
stats_dbase = {};
fold_dbase = {};
ex_db = {};
si_db = [];
bad_row_import = {};
count = 0
global array_group_name_db;
array_group_name_db = {}
global array_group_db;
array_group_db = {};
global array_raw_group_values;
array_raw_group_values = {};
global original_array_names;
original_array_names = []
global max_replicates;
global equal_replicates;
global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn = filepath(filename);
line_num = 1
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data, '\t');
probeset = t[0]
if t[0] == '#':
null = [] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = [];
x = 0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry, ':')
try:
array_group, array_name = aa
except Exception:
array_name = string.join(aa[1:], ':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(
array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + filename + '" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n" + line
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num += 1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try:
exp_val = float(t[array_index + 1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset] = line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try:
temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError:
temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[
probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num - 1
if len(bad_row_import) > 0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:";
x = 0
for i in bad_row_import:
if x == 0: print bad_row_import[i]
try:
del array_raw_group_values[i]
except Exception:
null = []
x += 1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb = {};
missing_genedb = {};
addback_genedb = {};
rnaseq_cs_gene_db = {}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null = array_raw_group_values[probeset];
cs_genedb[gene] = []
if gene == probeset: rnaseq_cs_gene_db[
gene] = [] ### If RPKM normalization used, use the gene expression values already calculated
except Exception:
missing_genedb[gene] = [] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try:
null = cs_genedb[gene]
except Exception:
addback_genedb[gene] = []
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null = addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null = string.split(probeset, ':')
if len(null) < 3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset] = gene
except Exception:
null = []
except Exception:
null = []
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count = 0;
constitutive_probeset_db2 = {}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count += 1
if len(
rnaseq_cs_gene_db) > 0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db = {} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene] = gene
elif junction_count != 0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2;
constitutive_probeset_db2 = []
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db;
global exon_dbase;
global critical_exon_db;
critical_exon_db = {}
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(
exon_db, constitutive_probeset_db, array_raw_group_values, agglomerate_inclusion_probesets,
onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:', len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = JunctionArray.getPutativeSpliceEvents(
species, array_type, exon_db, agglomerate_inclusion_probesets, root_dir)
print 'Number of Genes with Examined Splice Events:', len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values, exon_inclusion_db)
exon_inclusion_db = []
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()] = []
reciprocal_probesets[event.ExclusionProbeset()] = []
not_evalutated = {}
for probeset in array_raw_group_values:
try:
null = reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try:
null = constitutive_probeset_db[probeset]
except Exception:
not_evalutated[probeset] = []
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x = 0;
y = 0;
array_raw_group_values2 = {};
probesets_to_delete = [] ### Record deleted probesets
if len(array_raw_group_values) == 0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values) > 0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists = []
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][
group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list, 'log')
data_lists.append(data_list)
if len(array_group_list) == 2:
data_list1 = data_lists[0];
data_list2 = data_lists[-1];
avg1 = statistics.avg(data_list1);
avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
if p == -1:
if len(data_list1) > 1 and len(data_list2) > 1:
print_out = "The probability statistic selected (" + probability_statistic + ") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out; print "Exiting program"
badExit()
else:
p = 1
except Exception:
p = 1
fold_dbase[probeset] = [0];
fold_dbase[probeset].append(log_fold)
stats_dbase[probeset] = [avg1];
stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];
del stats_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (
avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];
del stats_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
else:
array_raw_group_values2[probeset] = [data_list1, data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index = 0
for data_list in data_lists:
try:
array_raw_group_values2[probeset].append(data_list)
except KeyError:
array_raw_group_values2[probeset] = [data_list]
if len(array_group_list) > 2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index == 0:
avg_baseline = statistics.avg(data_list);
stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try:
fold_dbase[probeset].append(log_fold)
except KeyError:
fold_dbase[probeset] = [0, log_fold]
index += 1
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2;
array_raw_group_values2 = []
print x, id_name, "excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db;
global original_fold_dbase
global avg_const_exp_db;
global permute_lists;
global midas_db
if len(array_raw_group_values) > 0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(
fold_dbase, stats_dbase, exon_db, constitutive_probeset_db)
stats_dbase = [] ### No longer needed after this point
original_fold_dbase = fold_dbase;
avg_const_exp_db = {};
permute_lists = [];
y = 0;
original_conditions = conditions;
max_replicates, equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,
annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db, constitutive_gene_db, array_raw_group_values,
exon_db, y, avg_const_exp_db);
y += 1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list, array_raw_group_values,
array_group_name_db, avg_const_exp_db, adj_fold_dbase,
exon_db, dataset_name, apt_location)
print "Finished exporting input data for MiDAS analysis"
try:
midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception:
midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else:
midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try:
null = reciprocal_probesets[probeset]
except Exception:
try:
del array_raw_group_values[probeset]
except Exception:
null = []
not_evalutated = [];
reciprocal_probesets = []
constitutive_probeset_db = []
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list) > 2 and analysis_method == 'splicing-index' and (
array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del nonlog_NI_db[probeset]
except KeyError:
null = []
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID'] + original_array_names, '\t') + '\n';
adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db) / 20);
increment = original_increment;
interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment += original_increment; print '*',
interaction += 1
geneid = exon_db[probeset].GeneID();
ed = exon_db[probeset]
index = 0;
NI_list = [] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI, index));
index += 1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists = {} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k = 0;
gi = 0;
adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp - avg_const_exp_db[geneid][k]
try:
adj_exp_lists[gi].append(adj_exp_val)
except Exception:
adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k += 1
gi += 1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (
array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(), '|')
for er in exon_regions:
if len(er) > 0:
er = er
else:
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join([geneid + '\t' + er + '\t' + probeset] + adj_exp_vals,
'\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset in filtered_probeset_db: adjoutput.write(
ev) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
except Exception:
ev = string.join([geneid + '\t' + 'NA' + '\t' + probeset] + adj_exp_vals, '\t') + '\n';
adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1 = 0;
k2 = 0;
filtered_NI_comps = []
NI_list_rev = list(NI_list);
NI_list_rev.reverse()
NI1, index1 = NI_list[k1];
NI2, index2 = NI_list_rev[k2];
abs_SI = abs(math.log(NI1 / NI2, 2))
if abs_SI < alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0], NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI, k1, k2])
else:
for i1 in NI_list:
k2 = 0
for i2 in NI_list_rev:
NI1, index1 = i1;
NI2, index2 = i2;
abs_SI = abs(math.log(NI1 / NI2, 2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI < alt_exon_logfold_cutoff:
break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI, k1, k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2 += 1
k1 += 1
if len(filtered_NI_comps) > 0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si, k1, k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1], NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1];
index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0], NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1];
data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1);
avg2 = statistics.avg(data_list2);
log_fold = avg2 - avg1
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
fold_dbase[probeset] = [0];
fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1:
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (
ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
normInt1 = (avg1 - constit_exp1);
normInt2 = (avg2 - constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1 * adj_fold;
abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI = []
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try:
normIntensityP = statistics.OneWayANOVA(
all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
normIntensityP = 'NA'
if (normInt1 * normInt2) < 0:
opposite_SI_log_mean = 'yes'
else:
opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index > alt_exon_logfold_cutoff and (
midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, normInt1, normInt2,
normIntensityP, opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1);
sid.setConstitutiveFold(ge_fold);
sid.setProbesetExpressionData(ped)
si_db.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid, normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(si_db), id_name, "with evidence of Alternative expression"
original_fold_dbase = fold_dbase;
si_db.sort()
summary_data_db['denominator_exp_events'] = len(nonlog_NI_db)
del avg_const_exp_db;
del gene_db;
del constitutive_gene_db;
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list) > 2 and (
array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db = {}
group_sizes = [];
original_array_indices = permute_lists[
0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated = []
for group in array_raw_group_values[probeset]: ls_concatenated += group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1 = 0;
pos2 = 0;
positions = []
for group in group_sizes:
if pos1 == 0:
pos2 = group; positions.append((pos1, pos2))
else:
pos2 = pos1 + group; positions.append((pos1, pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion'] + original_array_names, '\t') + '\n';
adjoutput.write(title)
events_examined = 0;
denominator_events = 0;
fold_dbase = [];
adj_fold_dbase = [];
scores_examined = 0
splice_event_list = [];
splice_event_list_mx = [];
splice_event_list_non_mx = [];
event_mx_temp = [];
permute_p_values = {};
probeset_comp_db = {}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1];
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset();
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction();
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData([probeset1, probeset2], geneid, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[geneid].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores = []
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined += 1
if analysis_method == 'ASPIRE':
index1 = 0;
NI_list1 = [];
NI_list2 = [] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1];
index2 = 0
for NI1_g2 in NI_list1:
try:
NI2_g2 = NI_list2[index2]
except Exception:
print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1;
e1 = NI1_g2
b2 = NI2_g1;
e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1, e1, b2, e2);
Rin = b1 / e1;
Rex = b2 / e2
if (Rin > 1 and Rex < 1) or (Rin < 1 and Rex > 1):
if dI < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
dI_scores.append((abs(dI), i1, i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null = []
index2 += 1
index1 += 1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold, i1, i2 = getAllPossibleLinearRegressionScores(probeset1, probeset2, positions,
group_sizes)
dI_scores.append((log_fold, i1, i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1];
raw_exp_vals2 = original_array_raw_group_values[probeset2]
else:
raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[
probeset2]
adj_exp_lists1 = {};
adj_exp_lists2 = {} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi = 0;
l = 0;
adj_exp_vals = [];
anova_test = []
for exp_list in raw_exp_vals1:
k = 0;
anova_group = []
for exp in exp_list:
adj_exp_val1 = exp - avg_const_exp_db[geneid][l]
try:
adj_exp_lists1[gi].append(adj_exp_val1)
except Exception:
adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k] - avg_const_exp_db[geneid][l]
try:
adj_exp_lists2[gi].append(adj_exp_val2)
except Exception:
adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2 - adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2 - adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k += 1;
l += 0
gi += 1;
anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1], '|')
exon_regions = string.split(exon_regions, '|')
for er in exon_regions:
ev = string.join(
[geneid + '\t' + probeset1 + '-' + probeset2 + '\t' + er] + adj_exp_vals,
'\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(
ev) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
try:
anovaNIp = statistics.OneWayANOVA(
anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
anovaNIp = 'NA'
if len(dI_scores) > 0 and geneid in avg_const_exp_db:
dI, index1, index2 = dI_scores[-1];
count = 0
probesets = [probeset1, probeset2];
index = 0
key, jd = formatJunctionData([probeset1, probeset2], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:
null = []
probeset_comp_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates > 2 or equal_replicates == 2: permute_p_values[(probeset1, probeset2)] = [
anovaNIp, 'NA', 'NA', 'NA']
index = 0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1];
data_list2 = original_array_raw_group_values[probeset][index2]
else:
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = \
array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2,
probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
if index == 0:
try:
adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(
adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1, raw_exp_vals2, avg_const_exp_db[geneid]
print probeset, probesets, adj_exp_lists1, adj_exp_lists2, index1, index2;
kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_exp_p, group_name2 + '_vs_' + group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(
adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_exp_p, group_name2 + '_vs_' + group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
index += 1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],
probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],
probability_statistic)
except Exception:
pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores) > 0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores) > 0:
scores_examined += 1
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
if dI > alt_exon_logfold_cutoff and (
anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI, probeset1, probeset2, pp1, pp2, 'upregulated', event_call,
critical_exon_list, affygene, ped1, ped2)
ejd.setConstitutiveFold(ge_fold);
ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI, ejd))
else:
excluded_probeset_db[
affygene + ':' + critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase;
original_avg_const_exp_db = [];
nonlog_NI_db = [];
fold_dbase = []
summary_data_db['denominator_exp_events'] = events_examined
del avg_const_exp_db;
del gene_db;
del constitutive_gene_db;
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase = [];
original_fold_dbase = [];
exon_db = [];
constitutive_gene_db = [];
addback_genedb = []
gene_db = [];
missing_genedb = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp;
self.experimental_exp = experimental_exp
self.fold_change = fold_change;
self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp;
self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp() + '|' + FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values, exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset]) > 1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets, '|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes = {}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db = {}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list = [];
i = 0
while i < x:
temp_val_list = []
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i += 1;
val_avg = statistics.avg(temp_val_list);
new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db, stats_dbase, exon_db, constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {};
constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[
probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try:
gene_db[affygene].append(probeset)
except KeyError:
gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (
only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try:
constitutive_gene_db[affygene].append(probeset)
except KeyError:
constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db) > 0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else:
gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list = []
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase = {};
nonlog_NI_db = {};
constitutive_fold_change = {}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list = []
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log / expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log / baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log / baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log / constitutive_fold_diff #gives a GE adjusted expression
try:
ge_adj_exp = math.log(ge_adj_exp_non_log, 2)
except ValueError:
print probeset, ge_adj_exp_non_log, constitutive_fold_diff, exp_val_non_log, exp_val, baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try:
nonlog_NI_db[probeset].append(
exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError:
nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x != 0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log / baseline_const_exp_non_log
fold_change_log = math.log(fold_change, 2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try:
del adj_fold_dbase[probeset]
except KeyError:
n = 1
try:
del nonlog_NI_db[probeset]
except KeyError:
n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes'] = len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed;
gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold;
self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr() + '|' + RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change, annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene];
rna_processing_annotation = ''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[
affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold, rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db, constitutive_gene_db, array_raw_group_values, exon_db, y,
avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db = {}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]);
break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list = []
for probeset in probeset_list:
try:
exp_val = array_raw_group_values[probeset][y][
x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError:
continue
exp_list.append(exp_val)
try:
avg_const_exp = statistics.avg(exp_list)
except Exception:
avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios;
normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try:
normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError:
normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self, element, changed, measured, zscore, null_z, gene_symbols):
self._element = element;
self._changed = changed;
self._measured = measured
self._zscore = zscore;
self._null_z = null_z;
self._gene_symbols = gene_symbols
def ElementID(self):
return self._element
def Changed(self):
return str(self._changed)
def Measured(self):
return str(self._measured)
def AssociatedWithElement(self):
return str(self._gene_symbols)
def ZScore(self):
return str(self._zscore)
def SetP(self, p):
self._permute_p = p
def PermuteP(self):
return str(self._permute_p)
def SetAdjP(self, adjp):
self._adj_p = adjp
def AdjP(self):
return str(self._adj_p)
def PercentChanged(self):
try:
pc = float(self.Changed()) / float(self.Measured()) * 100
except Exception:
pc = 0
return str(pc)
def NullZ(self):
return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self):
return self.Report()
class FDRStats(ZScoreData):
def __init__(self, p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list, probeset_to_gene, probeset_element_db):
element_gene_db = {}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try:
element_gene_db[element].append(gene)
except KeyError:
element_gene_db[element] = [gene]
except KeyError:
null = []
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i] = []
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list = []
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol) < 1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list, ', ')
return symbol_str
def zscore(r, n, N, R):
z = (r - n * (R / N)) / math.sqrt(
n * (R / N) * (1 - (R / N)) * (1 - ((n - 1) / (N - 1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db, denom_count_db, total_gene_denom_count, total_gene_hit_count, element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else:
r = 0; gene_symbols = ''
try:
z = zscore(r, n, N, R)
except Exception:
z = 0; #print 'error:',element,r,n,N,R; kill
try:
null_z = zscore(0, n, N, R)
except Exception:
null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element, r, n, z, null_z, gene_symbols)
if element_type == 'domain':
original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA':
original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r, n, R, N)
zsd.SetP(p)
return N, R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs, element_denominator_gene_count, N, R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try:
z = statistics.zscore(r, n, N, R)
except Exception:
z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(
nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores += [abs(
nullz)] * null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores) > 0:
p = permute_p(permute_scores, z)
else:
p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r, n, R, N):
a = r;
b = n - r;
c = R - r;
d = N - R - b
table = [[int(a), int(b)], [int(c), int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval = []
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p, element])
spval.sort();
tmp = spval;
m = len(spval);
i = m - 2;
x = 0 ###Step 1-4
while i > -1:
tmp[i] = min(tmp[i + 1][0], min((float(m) / (i + 1)) * spval[i][0], 1)), tmp[i][1];
i -= 1
for (adjp, element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval = []
def permute_p(null_list, true_value):
y = 0;
z = 0;
x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y) / float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data, element_type):
element_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-' + element_type + '-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type + '-Name', 'Number Changed', 'Number Measured', 'Percent Changed', 'Zscore', 'PermuteP',
'AdjP', 'Changed GeneSymbols']
headers = string.join(headers, '\t') + '\n'
data.write(headers);
sort_results = []
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
try:
results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(),
zsd.AssociatedWithElement()]
except AttributeError:
print element, len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results, '\t') + '\n'
sort_results.append([float(zsd.PermuteP()), -1 / float(zsd.Measured()), results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {};
denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene, denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids = {};
critical_probeset_annotation_db = {}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + species + "_Ensembl_" + array_type + "_probesets.txt"
critical_exon_annotation_file = filename = getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try:
filtered_arrayids[gene + ':' + critical_exon].append(uid)
except TypeError:
print gene, critical_exon, uid;kill
except KeyError:
filtered_arrayids[gene + ':' + critical_exon] = [uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file, 'exon-fake',
filtered_arrayids, filter_status);
null = [] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try:
critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError:
critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[
junction_probesets]) > 1: ###Thus multiple exons associated, must combine annotations
exon_ids = [];
external_exonids = [];
exon_regions = [];
splicing_events = []
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID();
transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID());
external_exonids.append(ed.ExternalExonIDs());
exon_regions.append(ed.ExonRegionID());
se = string.split(ed.SplicingEvent(), '|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids, '|');
external_exonid = string.join(external_exonids, '|');
exon_region = string.join(exon_regions, '|');
splicing_event = string.join(splicing_events, '|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '',
'')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2 = {}
if 'TC' in external_probeset_db:
temp_index = {};
i = 0;
type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i] = i; i += 1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try:
NI_fold = float(list[NI_fold_index])
except Exception:
NI_fold = 1
try:
MADSp1 = float(list[MADS_p1_index])
except Exception:
MADSp1 = 1
try:
MADSp2 = float(list[MADS_p2_index])
except Exception:
MADSp1 = 1
if MADSp1 < MADSp2:
pval = MADSp1
else:
pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold, pval
else:
type = 'generic'
a = [];
b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try:
a.append(abs(float(external_probeset_db[id][0][0])))
except Exception:
null = []
try:
b.append(abs(float(external_probeset_db[id][0][1])))
except Exception:
null = []
a.sort();
b.sort();
pval_index = None;
score_index = None
if len(a) > 0:
if max(a) > 1:
score_index = 0
else:
pval_index = 0
if len(b) > 0:
if max(b) > 1:
score_index = 1
else:
pval_index = 1
for id in external_probeset_db:
if score_index != None:
score = external_probeset_db[id][0][score_index]
else:
score = 1
if pval_index != None:
pval = external_probeset_db[id][0][pval_index]
else:
pval = 1
external_probeset_db2[id] = score, pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db = {};
splice_event_list = [];
p_value_call = {};
permute_p_values = {};
gene_expression_diff_db = {}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in analyzed_probeset_db: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del analyzed_probeset_db[probeset]
except KeyError:
null = []
for probeset in analyzed_probeset_db:
ed = exon_db[probeset];
geneid = ed.GeneID()
td = TranscriptionData('', '');
gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
splicing_index, normIntensityP = external_probeset_db[probeset]
group1_ratios = [];
group2_ratios = [];
exp_log_ratio = '';
ttest_exp_p = '';
normIntensityP = '';
opposite_SI_log_mean = ''
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean)
splice_event_list.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0, geneid, 'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db, fold_dbase, dataset_name, gene_expression_diff_db, exon_db, ex_db, si_db,
dataset_dir):
protein_exon_feature_db = {};
global regulated_exon_junction_db;
global critical_exon_annotation_db;
global probeset_comp_db;
probeset_comp_db = {}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on", dataset_name[0:-1], "data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(
dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(
nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db;
splice_event_list = si_db;
clearObjectsFromMemory(ex_db);
clearObjectsFromMemory(si_db)
ex_db = [];
si_db = [];
permute_p_values = {};
p_value_call = ''
else:
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores;
permuted_z_scores = {};
global original_domain_z_score_data;
original_domain_z_score_data = {}
global original_microRNA_z_score_data;
original_microRNA_z_score_data = {}
nonlog_NI_db = [] ### Clear memory of this large dictionary
try:
clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception:
null = []
try:
clearObjectsFromMemory(avg_const_exp_db)
except Exception:
null = []
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
try:
clearObjectsFromMemory(fold_dbase); fold_dbase = []
except Exception:
null = []
microRNA_full_exon_db, microRNA_count_db, gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,
array_type,
exon_db,
microRNA_prediction_method,
explicit_data_type,
root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len, domain_associated_genes = importProbesetAligningDomains(exon_db, 'gene')
else:
protein_ft_db_len, domain_associated_genes = importProbesetProteinCompDomains(exon_db, 'gene', 'exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene, denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_' + array_type + '-exon_probesets.txt'
try:
exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception:
exon_array_translation_db = {} ### Not present for all species
exon_hits = {};
clearObjectsFromMemory(probeset_comp_db);
probeset_comp_db = []
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score, ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(), '|');
probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1, ed.Probeset2())
else:
uid = ed.Probeset1()
gene_exon = geneid, uid;
exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method + '-' + dataset_name[8:-1]
global functional_attribute_db;
global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db = {};
filtered_arrayids = {};
filter_status = 'yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score, entry) in splice_event_list:
try:
probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception:
probeset = entry.Probeset1()
pl = string.split(probeset, '|');
probeset = pl[0];
filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try:
probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError:
null = [] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file, array_type, filtered_arrayids, filter_status);
null = [] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
regulated_exon_junction_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
regulated_exon_junction_db, 'probeset', 'exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
exon_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
exon_db, 'probeset', 'exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db, exon_hits)
microRNA_full_exon_db = []
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {};
all_microRNA_gene_hits = {};
microRNA_attribute_db = {};
probeset_mirBS_db = {}
for (affygene,
uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene, uid)]
for mir_key in microRNA_symbol_list:
microRNA, gene_symbol, miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA, '~')
try:
microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError:
microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try:
microRNA_attribute_db[(affygene, uid)].append(specific_microRNA_tuple)
except KeyError:
microRNA_attribute_db[(affygene, uid)] = [specific_microRNA_tuple]
miR_data = microRNA + ':' + miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' + '(' + miR_data + ')' + miR_seq,
'~') ###Add miR sequence information to the sequence field of the report
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try:
probeset_mirBS_db[uid].append(microRNA)
except KeyError:
probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list, ',');
miR_str = '(' + miR_str + ')'
function_type = ('microRNA-target' + miR_str, '~')
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {};
all_domain_gene_hits = {};
probeset_domain_db = {}
for entry in protein_features:
gene, uid = entry
for data_tuple in protein_features[entry]:
domain, call = data_tuple
try:
protein_exon_feature_db[entry].append(data_tuple)
except KeyError:
protein_exon_feature_db[entry] = [data_tuple]
try:
domain_hit_gene_count_db[domain].append(gene)
except KeyError:
domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene] = []
if perform_element_permutation_analysis == 'yes':
try:
probeset_domain_db[uid].append(domain)
except KeyError:
probeset_domain_db[uid] = [domain]
protein_features = [];
domain_gene_changed_count_db = []
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm, Rm = calculateZScores(microRNA_hit_gene_count_db, microRNA_count_db, total_microRNA_gene_denom_count,
total_microRNA_gene_hit_count, 'microRNA')
gene_microRNA_denom = []
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events'] = len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd, Rd = calculateZScores(domain_hit_gene_count_db, domain_gene_counts, total_domain_gene_denom_count,
total_domain_gene_hit_count, 'domain')
microRNA_hit_gene_counts = {};
gene_to_miR_db = {} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try:
gene_to_miR_db[gene].append(microRNA)
except KeyError:
gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(
splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations / 20);
increment = original_increment
start_time = time.time();
print 'Permuting the Domain/miRBS analysis %d times' % permutations
x = 0;
permute_domain_inputs = [];
permute_miR_inputs = []
while x < permutations:
if x == increment: increment += original_increment; print '*',
permute_input_list = random.sample(denominator_list, input_count);
x += 1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list, probeset_to_gene,
probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list, probeset_to_gene,
probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs, domain_gene_counts, Nd, Rd)
calculatePermuteZScores(permute_miR_inputs, microRNA_hit_gene_counts, Nm, Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data, 'ft-domain')
exportZScoreData(original_microRNA_z_score_data, 'microRNA')
end_time = time.time();
time_diff = int(end_time - start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list = []
try:
clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception:
null = []
microRNA_hit_gene_count_db = {};
microRNA_hit_gene_counts = {};
clearObjectsFromMemory(permuted_z_scores);
permuted_z_scores = [];
original_domain_z_score_data = []
if (array_type == 'AltMouse' or ((
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db, 'perfect_match')
else:
probeset_aligning_db = importProbesetAligningDomains(exon_db, 'perfect_match')
############ Export exon/junction level results ############
splice_event_db = {};
protein_length_list = [];
aspire_gene_results = {}
critical_gene_exons = {};
unique_exon_event_db = {};
comparison_count = {};
direct_domain_gene_alignments = {}
functional_attribute_db2 = {};
protein_exon_feature_db2 = {};
microRNA_exon_feature_db2 = {}
external_exon_annot = {};
gene_exon_region = {};
gene_smallest_p = {};
gene_splice_event_score = {};
alternatively_reg_tc = {}
aspire_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir + 'GO-Elite/AltExon/AS.' + dataset_name + analysis_method + '.txt'
goelite_data = export.ExportFile(goelite_output);
gcn = 0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir + 'AltResults/DomainGraph/' + dataset_name + analysis_method + '-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir + 'GO-Elite/exon_denominator/' + species + '-' + array_type + '.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output, 'DomainGraph',
'ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions) > 0: ens_version = elite_db_versions[0]
except Exception:
null = []
ens_version = string.replace(ens_version, 'EnsMart', 'ENS_')
DG_data.write(ens_version + "\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write(
"ExonID(s)\tGeneID\tRegulation call\t" + analysis_method + "\t" + analysis_method + " p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes':
p_value_type = 'permutation-values'
else:
p_value_type = 'FDR-' + p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl';
extra_transcript_annotation = 'transcript cluster ID';
extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1 = 'junctionID-1';
id2 = 'junctionID-2';
loc_column = 'exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else:
id1 = 'probeset1'; id2 = 'probeset2'; loc_column = 'probeset locations'
title = [gene_name, analysis_method, 'symbol', 'description', 'exons1', 'exons2', 'regulation_call',
'event_call', id1, 'norm-p1', id2, 'norm-p2', 'fold1', 'fold2']
title += ['adj-fold1', 'adj-fold2', extra_transcript_annotation, 'critical_up_exons', 'critical_down_exons',
'functional_prediction', 'uniprot-ens_feature_predictions']
title += ['peptide_predictions', 'exp1', 'exp2', 'ens_overlapping_domains', 'constitutive_baseline_exp',
p_value_call, p_value_type, 'permutation-false-positives']
title += ['gene-expression-change', extra_exon_annotation, 'ExternalExonIDs', 'ExonRegionID', 'SplicingEvent',
'ExonAnnotationScore', 'large_splicing_diff', loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp';
splicing_score = 'Splicing-Index';
lowestp = 'lowest_p (MIDAS or SI)';
AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp';
splicing_score = 'FIRMA_fold';
lowestp = 'lowest_p (MIDAS or FIRMA)';
AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1 = 'junctionID';
pval_column = 'junction p-value';
loc_column = 'junction location'
else:
id1 = 'probeset'; pval_column = 'probeset p-value'; loc_column = 'probeset location'
if array_type == 'RNASeq':
secondary_ID_title = 'Known/Novel Feature'
else:
secondary_ID_title = 'alternative gene ID'
title = ['Ensembl', splicing_score, 'symbol', 'description', 'exons', 'regulation_call', id1, pval_column,
lowestp, 'midas p-value', 'fold', 'adjfold']
title += ['up_exons', 'down_exons', 'functional_prediction', 'uniprot-ens_feature_predictions',
'peptide_predictions', 'ens_overlapping_domains', 'baseline_probeset_exp']
title += ['constitutive_baseline_exp', NIpval, AdjPcolumn, 'gene-expression-change']
title += [secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations',
'distal exon-region-ID', loc_column]
title = string.join(title, '\t') + '\n'
try:
if original_conditions > 2: title = string.replace(title, 'regulation_call', 'conditions_compared')
except Exception:
null = []
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats = {}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score, entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try:
adjustPermuteStats(fdr_exon_stats)
except Exception:
null = []
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db = {}
for (score, entry) in splice_event_list:
dI = entry.Score();
geneID = entry.GeneID()
try:
gene_deviation_db[geneID].append(dI)
except Exception:
gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try:
dI = entry.Score(); geneID = entry.GeneID()
except Exception:
geneID = entry[1]; dI = entry[-1]
try:
gene_deviation_db[geneID].append(dI)
except Exception:
None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI = statistics.avg(gene_deviation_db[geneID])
stdev_dI = statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI, stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA', 'NA'
event_count = 0
for (score, entry) in splice_event_list:
event_count += 1
dI = entry.Score();
probeset1 = entry.Probeset1();
regulation_call = entry.RegulationCall();
event_call = entry.EventCall();
critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1;
selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try:
probeset1 = original_exon_db[probeset1].Probeset()
except Exception:
null = []
else:
probeset1 = probeset1;
exons1 = original_exon_db[probeset1].ExonID()
try:
selected_probeset = original_exon_db[probeset1].Probeset()
except Exception:
selected_probeset = probeset1
else:
try:
exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI, stdev_dI = gene_deviation_db[affygene]
try:
DV = deviation(dI, avg_dI,
stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception:
DV = 'NA'
if affygene in annotate_db:
description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else:
description = ''; symbol = ''
ped1 = entry.ProbesetExprData1();
adjfold1 = ped1.AdjFold();
exp1 = ped1.BaselineExp();
fold1 = ped1.FoldChange();
rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
mean_fold_change = str(
entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2();
exons2 = exon_db[probeset2].ExonID();
rawp1 = str(entry.TTestNormalizedRatios());
rawp2 = str(entry.TTestNormalizedRatios2());
critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2();
adjfold2 = ped2.AdjFold();
exp2 = ped2.BaselineExp();
fold2 = ped2.FoldChange()
try:
location_summary = original_exon_db[selected_probeset].LocationSummary() + '|' + original_exon_db[
probeset2].LocationSummary()
except Exception:
try:
location_summary = exon_db[selected_probeset].LocationSummary() + '|' + exon_db[
probeset2].LocationSummary()
except Exception:
location_summary = ''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try:
extra_exon_annotation = last_exon_region_db[affygene]
except KeyError:
extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1, tc2])
extra_transcript_annotation = string.join(probeset_tc, '|')
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
except Exception:
extra_transcript_annotation = ''
if array_type == 'RNASeq':
try:
extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception:
None
exp_list = [float(exp1), float(exp2), float(exp1) + float(fold1), float(exp2) + float(fold2)];
exp_list.sort();
exp_list.reverse()
probeset_tuple = (probeset1, probeset2)
else:
try:
exp_list = [float(exp1), float(exp1) + float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception:
exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call) > 0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else:
lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
raw_p_list = [entry.TTestNormalizedRatios(),
entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try:
raw_p_list = [
float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception:
raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0];
pos_permute = "NA";
total_permute = "NA";
false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute) + ' out of ' + str(total_permute)
else:
p_value_extra = str(pos_permute)
up_exons = '';
down_exons = '';
up_exon_list = [];
down_exon_list = [];
gene_exon_list = []
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';
up_exon_list.append(exon)
key = affygene, exon + '|';
gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';
down_exon_list.append(exon)
key = affygene, exon + '|';
gene_exon_list.append(key)
else:
try:
exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception:
print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';
down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1);
down_exon_list.append(exon2)
key = affygene, exon1 + '|';
gene_exon_list.append(key);
key = affygene, exon2 + '|';
gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';
down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2);
down_exon_list.append(exon1)
key = affygene, exon1 + '|';
gene_exon_list.append(key);
key = affygene, exon2 + '|';
gene_exon_list.append(key)
up_exons = up_exons[0:-1];
down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions > 2:
try:
regulation_call = ped1.Annotation()
except Exception:
null = []
except Exception:
null = []
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str, protein_length_list = format_exon_functional_attributes(
affygene, critical_probeset_list, functional_attribute_db, up_exon_list, down_exon_list,
protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(
affygene, critical_probeset_list, protein_exon_feature_db, up_exon_list, down_exon_list, null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,
critical_probeset_list,
microRNA_attribute_db,
up_exon_list, down_exon_list,
null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(
seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]), critical_exon_list[1], event_call, regulation_call]
try:
float((lowest_raw_p))
except ValueError:
lowest_raw_p = 0
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError:
unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
protein_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
protein_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
microRNA_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
microRNA_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
functional_attribute_db2[affygene, attribute].append(exon)
except KeyError:
functional_attribute_db2[affygene, attribute] = [exon]
try:
abs_fold = abs(float(mean_fold_change));
fold_direction = 'down';
fold1_direction = 'down';
fold2_direction = 'down'
large_splicing_diff1 = 0;
large_splicing_diff2 = 0;
large_splicing_diff = 'null';
opposite_splicing_pattern = 'no'
if float(mean_fold_change) > 0: fold_direction = 'up'
if float(fold1) > 0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1) > float(mean_fold_change): large_splicing_diff1 = float(fold1) - float(mean_fold_change)
except Exception:
fold_direction = '';
large_splicing_diff = '';
opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
ed = exon_db[probeset1]
else:
try:
ed = critical_probeset_annotation_db[selected_probeset, probeset2]
except KeyError:
try:
ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError:
ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron", "cassetteExon", "strangeSplice", "altFivePrime", "altThreePrime",
"altPromoter", "bleedingExon"]
custom_annotations = ["alt-3'", "alt-5'", "alt-C-term", "alt-N-term", "cassette-exon", "cassette-exon",
"exon-region-exclusion", "intron-retention", "mutually-exclusive-exon", "trans-splicing"]
custom_exon_annotations_found = 'no';
ucsc_annotations_found = 'no';
exon_annot_score = 0
if len(ed.SplicingEvent()) > 0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no':
exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no':
exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes':
exon_annot_score = 5
else:
exon_annot_score = 2
try:
gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError:
gene_splice_event_score[affygene] = [exon_annot_score]
try:
gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError:
gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2) > 0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2) > float(mean_fold_change):
large_splicing_diff2 = float(fold2) - float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1:
large_splicing_diff = str(large_splicing_diff2)
else:
large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1)) > 0.4 and abs(float(fold2)) > 0.4 and abs(
float(mean_fold_change)) < max([float(fold2), float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1, exons2,
extra_transcript_annotation)
try:
splice_event_db[extra_exon_annotation] += 1
except KeyError:
splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset, probeset2]
try:
direct_domain_gene_alignments[affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display, splicing_event)
splicing_event = checkForTransSplicing(probeset2, splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values = [affygene, dI, symbol, fs(description), exons1, exons2, regulation_call, event_call,
probeset1_display, rawp1, probeset2, rawp2, fold1, fold2, adjfold1, adjfold2]
values += [extra_transcript_annotation, up_exons, down_exons, fs(new_functional_attribute_str),
fs(new_uniprot_exon_feature_str), fs(seq_attribute_str), exp1, exp2,
fs(direct_domain_alignments)]
values += [str(baseline_const_exp), str(lowest_raw_p), p_value_extra, str(false_pos), mean_fold_change,
extra_exon_annotation]
values += [ed.ExternalExonIDs(), ed.ExonRegionID(), splicing_event, str(exon_annot_score),
large_splicing_diff, location_summary]
exon_sets = abs(float(dI)), regulation_call, event_call, exons1, exons2, ''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1 + '|' + probeset2, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
values_ge = [affygene, 'En', dI, str(lowest_raw_p), symbol, probeset1_display + ' | ' + probeset2];
values_ge = string.join(values_ge, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try:
exon_probeset = exon_array_translation_db[affygene + ':' + exon_data[1][0]][
0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_dg = string.join(values_dg, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p) < lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else:
midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''; gene_tc = ''
else:
try:
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''; gene_tc = ''
cluster_number = len(probeset_tc)
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
try:
last_exon_region = last_exon_region_db[affygene]
except KeyError:
last_exon_region = ''
if cluster_number > 1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try:
direct_domain_gene_alignments[affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
else:
try:
direct_domain_alignments = probeset_aligning_db[affygene + ':' + exons1]
except KeyError:
direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try:
adj_SIp = fdr_exon_stats[probeset1].AdjP()
except Exception:
adj_SIp = 'NA'
try:
secondary_geneid = ed.SecondaryGeneID()
except Exception:
secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values = [affygene, dI, symbol, fs(description), exons1, regulation_call, probeset1, rawp1,
str(lowest_raw_p), midas_p, fold1, adjfold1]
values += [up_exons, down_exons, fs(new_functional_attribute_str), fs(new_uniprot_exon_feature_str),
fs(seq_attribute_str), fs(direct_domain_alignments), exp1]
values += [str(baseline_const_exp), str(si_pvalue), DV, mean_fold_change, secondary_geneid,
ed.ExternalExonIDs()]
values += [ed.Constitutive(), ed.ExonRegionID(), ed.SplicingEvent(), last_exon_region,
ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)), regulation_call, event_call, exons1, exons1, midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try:
midas_p = str(midas_db[probeset1])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try:
exon_probeset = exon_array_translation_db[affygene + ':' + exon_data[1][0]][
0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None ### don't write out a line
else:
try:
exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1, affygene, 'changed', dI, str(si_pvalue), midas_p];
values_dg = string.join(values_dg, '\t') + '\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
values_ge = [affygene, 'En', dI, str(si_pvalue), midas_p, symbol, probeset];
values_ge = string.join(values_ge, '\t') + '\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent()) > 2:
try:
external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError:
external_exon_annot[affygene] = [ed.SplicingEvent()]
try:
values = string.join(values, '\t') + '\n'
except Exception:
print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p)) <= p_threshold or false_pos < 2 or lowest_raw_p == 1:
try:
comparison_count[affygene] += 1
except KeyError:
comparison_count[affygene] = 1
try:
aspire_gene_results[affygene].append(exon_sets)
except KeyError:
aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon, 'upregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon, 'downregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output, '\n'
try:
clearObjectsFromMemory(original_exon_db)
except Exception:
null = []
exon_array_translation_db = [];
original_exon_db = [];
probeset_to_gene = []
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try:
midas_p = str(midas_db[probeset])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
try:
values_ps = [probeset, eed.GeneID(), 'UC', eed.Score(), str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset, geneid, 'UC', str(score),
str(rawp), str(pvalue)]
values_ps = string.join(values_ps, '\t') + '\n';
ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try:
exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn += 1
except Exception:
probeset = None; # null=[] - force an error - new in version 2.0.8
try:
values_dg = [probeset, eed.GeneID(), 'UC', eed.Score(), str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset, geneid, 'UC', str(score), str(rawp), str(pvalue)]
except Exception:
None
try:
null = int(probeset)
values_dg = string.join(values_dg, '\t') + '\n';
DG_data.write(values_dg)
except Exception:
null = []
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id] + '\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset + '\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene], ', ')
domains = unique.unique(domains);
domains = string.join(domains, ', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2, 'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2, 'no')
############ Export Gene Data ############
up_splice_val_genes = 0;
down_dI_genes = 0;
diff_exp_spliced_genes = 0;
diff_spliced_rna_factor = 0
ddI = 0;
udI = 0
summary_data_db['direct_domain_genes'] = len(direct_domain_gene_alignments)
summary_data_db['alt_genes'] = len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene', 'max_dI', 'midas-p (corresponding)', 'symbol', 'external gene ID', 'description',
'regulation_call', 'event_call']
title += ['number_of_comparisons', 'num_effected_exons', 'up_exons', 'down_exons', 'functional_attribute',
'uniprot-ens_exon_features', 'direct_domain_alignments']
title += ['pathways', 'mean_fold_change', 'exon-annotations', 'exon-region IDs', 'alternative gene ID',
'splice-annotation score']
title = string.join(title, '\t') + '\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq':
transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(
transcript_clusters); transcript_clusters = string.join(transcript_clusters, '|')
else:
transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else:
description = '';symbol = '';ensembl = affygene;rna_processing_factor = ''; transcript_clusters = ''
if ensembl in go_annotations:
wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else:
goa = ''
if array_type == 'AltMouse':
if len(ensembl) > 0: goelite_data.write(ensembl + '\tL\n')
try:
gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError:
top_se_score = 'NA'
try:
gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(
gene_regions); gene_regions = string.join(gene_regions, '|')
except KeyError:
gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
number_of_comparisons = str(comparison_count[affygene])
else:
number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort();
results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try:
direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError:
direct_domain_annots = ' '
down_exons = '';
up_exons = '';
down_list = [];
up_list = []
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0];
call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons);
down_exons = add_a_space(down_exons)
functional_annotation = ''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
functional_annotation = functional_annotation + exons
else:
functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene];
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
uniprot_exon_annotation = uniprot_exon_annotation + exons
else:
uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception:
diff_exp_spliced_genes = diff_exp_spliced_genes
else:
mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor += 1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot:
external_gene_annot = string.join(external_exon_annot[affygene], ', ')
else:
external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values = [affygene, max_dI, midas_p, symbol, ensembl, fs(description), regulation_call, event_call,
number_of_comparisons]
values += [num_critical_exons, up_exons, down_exons, functional_annotation]
values += [fs(uniprot_exon_annotation), fs(direct_domain_annots), fs(goa), mean_fold_change,
external_gene_annot, gene_regions, transcript_clusters, top_se_score]
values = string.join(values, '\t') + '\n'
data.write(values)
### Use results for summary statistics
if len(up_list) > len(down_list):
up_splice_val_genes += 1
else:
down_dI_genes += 1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db, '', 'yes')
upregulated_genes = 0;
downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff:
upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff:
downregulated_genes += 1
except Exception:
null = []
upregulated_rna_factor = 0;
downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold > log_fold_cutoff:
upregulated_rna_factor += 1
elif abs(gene_fold) > log_fold_cutoff:
downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db, '',
'') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,
functional_attribute_db,
'') #functional_attribute_db
functional_attribute_db = [];
protein_exon_feature_db = []
###Sumarize changes in avg protein length for each splice event
up_protein_list = [];
down_protein_list = [];
protein_length_fold_diff = []
for [down_protein, up_protein] in protein_length_list:
up_protein = float(up_protein);
down_protein = float(down_protein)
down_protein_list.append(down_protein);
up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein / down_protein;
protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try:
down_avg = int(statistics.avg(down_protein_list)); up_avg = int(statistics.avg(up_protein_list))
except Exception:
down_avg = 0; up_avg = 0
try:
try:
down_std = int(statistics.stdev(down_protein_list));
up_std = int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;
up_std = 0
except Exception:
down_std = 0;
up_std = 0
if len(down_protein_list) > 1 and len(up_protein_list) > 1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list, up_protein_list, probability_statistic))
#print dataset_name,p
except Exception:
p = 'NA'
if p == 1: p = 'NA'
else:
p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count = 0;
unique_exon_exclusion_count = 0;
unique_mutual_exclusive_count = 0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant = [];
non_redundant = [];
check_for_redundant = []
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated':
unique_exon_inclusion_count += 1
else:
unique_exon_exclusion_count += 1
else:
unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count;
ddI = unique_exon_exclusion_count;
mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db: count = splice_event_db[splice_event]; functional_annotation_db.append(
(splice_event, count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI = 'NA'; ddI = 'NA'
summary_results_db[dataset_name[0:-1]] = udI, ddI, mx, up_splice_val_genes, down_dI_genes, (
up_splice_val_genes + down_dI_genes), upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor, downregulated_rna_factor, diff_spliced_rna_factor, down_avg, down_std, up_avg, up_std, p, median_fold_diff, functional_annotation_db
result_list = exportComparisonSummary(dataset_name, summary_data_db, 'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list);
clearObjectsFromMemory(si_db);
si_db = []
clearObjectsFromMemory(fdr_exon_stats)
try:
clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db = []
except Exception:
ex_db = []
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db = [];
gene_expression_diff_db = [];
domain_associated_genes = [];
permute_p_values = []
permute_miR_inputs = [];
seq_attribute_str = [];
microRNA_count_db = [];
excluded_probeset_db = [];
fdr_exon_stats = []
splice_event_list = [];
critical_exon_db_len = len(
critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits = [];
gene_splice_event_score = [];
unique_exon_event_db = [];
probeset_aligning_db = [];
ranked_uniprot_list_all = [];
filtered_microRNA_exon_db = [];
permute_domain_inputs = [];
functional_annotation_db2 = [];
functional_attribute_db2 = [];
protein_length_list = [];
ranked_uniprot_list_coding_only = [];
miR_str = [];
permute_input_list = [];
microRNA_exon_feature_db2 = [];
alternatively_reg_tc = [];
direct_domain_gene_alignments = [];
aspire_gene_results = [];
domain_gene_counts = [];
functional_annotation = [];
protein_exon_feature_db2 = [];
microRNA_attribute_db = [];
probeset_mirBS_db = [];
exon_hits = [];
critical_gene_exons = [];
gene_exon_region = [];
exon_db = [];
external_exon_annot = [];
values = [];
down_protein_list = [];
functional_annotation_db = [];
protein_length_fold_diff = [];
comparison_count = [];
filtered_arrayids = [];
domain_hit_gene_count_db = [];
up_protein_list = [];
probeset_domain_db = []
try:
goelite_data.close()
except Exception:
null = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI, avg_dI, stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI - avg_dI) / stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2, float(log_val)))
else:
nonlog_val = (math.pow(2, float(log_val))) - 1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val) < 0:
nonlog_val = (-1 / math.pow(2, (float(log_val))))
else:
nonlog_val = (math.pow(2, float(log_val)))
except Exception:
nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val) < 0:
nonlog_val = (-1 / math.pow(2, (float(log_val))))
else:
nonlog_val = (math.pow(2, float(log_val)))
return nonlog_val
def checkForTransSplicing(uid, splicing_event):
pl = string.split(uid, ':')
if len(pl) > 2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event) > 0:
splicing_event += '|trans-splicing'
else:
splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"' + text + '"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets'] + original_array_names, '\t') + '\n';
data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(fold_dbase), id_name, 'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash = [];
excluded_probeset_db = {};
denominator_probesets = 0;
interaction = 0
original_increment = int(len(exon_db) / 20);
increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment += original_increment; print '*',
interaction += 1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets += 1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0;
si_interim_group_db = {};
si_interim_group_str_db = {};
ge_threshold_count = 0;
value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0;
ratio_hash = [];
ratio_str_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;
ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val - ge_val;
ratio_hash.append(exp_ratio);
ratio_str_hash.append(str(exp_ratio))
value_index += 1;
value_count += 1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index += 1
group1_ratios = si_interim_group_db[0];
group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios);
group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join(
[geneid + '\t' + er + '\t' + probeset] + si_interim_group_str_db[0] + si_interim_group_str_db[
1], '\t') + '\n';
data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio * group2_mean_ratio) < 0:
opposite_SI_log_mean = 'yes'
else:
opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios, group2_ratios,
probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio - group2_mean_ratio;
abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else:
midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0];
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2,
probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1);
normInt2 = (experimental_exp - constit_exp2);
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p,
'')
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, group1_ratios,
group2_ratios, normIntensityP, opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1);
sid.setConstitutiveFold(ge_fold);
sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid, normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort();
splicing_index_hash.reverse()
print len(splicing_index_hash), id_name, "with evidence of Alternative expression"
p_value_call = '';
permute_p_values = {};
summary_data_db['denominator_exp_events'] = denominator_probesets
return splicing_index_hash, p_value_call, permute_p_values, excluded_probeset_db
def importResiduals(filename, probe_probeset_db):
fn = filepath(filename);
key_db = {};
x = 0;
prior_uid = '';
uid_gene_db = {}
for line in open(fn, 'rU').xreadlines():
if x == 0 and line[0] == '#':
null = []
elif x == 0:
x += 1
else:
data = cleanUpLine(line)
t = string.split(data, '\t')
uid = t[0];
uid, probe = string.split(uid, '-')
try:
probeset = probe_probeset_db[probe];
residuals = t[1:]
if uid == prior_uid:
try:
uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db) > 0: calculateFIRMAScores(uid_gene_db); uid_gene_db = {}
try:
uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception:
null = []
### For the last gene imported
if len(uid_gene_db) > 0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals = {};
all_gene_residuals = [];
total_probes = 0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset];
sample_db = {};
total_probes += len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index = 0
for residual in residuals:
try:
sample_db[index].append(float(residual))
except KeyError:
sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index += 1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals);
subtracted_residuals = []
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median - residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual / gene_MAD
sample_db[index] = firma_score
except Exception:
null = []
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_probeset-probes.txt'
probeset_to_include = {}
gene2examine = {}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try:
ed = exon_db[probeset]; gene2examine[ed.GeneID()] = []
except Exception:
null = []
for gene in original_avg_const_exp_db: gene2examine[gene] = []
for probeset in exon_db:
ed = exon_db[probeset];
geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset] = []
probeset_probe_db = importGenericFilteredDBList(filename, probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1], '.p');
filename = filename_objects[0] + '.txt'
if len(array_group_list) == 2:
filename = import_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/' + filename
else:
filename = import_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/' + filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:';
print_out += filename
print_out += 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc();
badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count / 20);
increment = original_increment
start_time = time.time();
x = 0
probe_probeset_db = {};
gene_count = 0;
total_gene_count = 0;
max_gene_count = 3000;
round = 1
for gene in gene2examine:
gene_count += 1;
total_gene_count += 1;
x += 1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename, probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count = 0;
probe_probeset_db = {};
round += 1 ### Reset these variables and re-run
probeset_probe_db = {}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename, probe_probeset_db)
end_time = time.time();
time_diff = int(end_time - start_time)
print "FIRMA scores calculted for", total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets'] + sample_names_ordered, '\t') + '\n';
data.write(title)
### Import probes for probesets to be analyzed
global firma_scores;
firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for', len(firma_scores), 'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash = [];
excluded_probeset_db = {};
denominator_probesets = 0;
interaction = 0
original_increment = int(len(firma_scores) / 20);
increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset];
geneid = ed.GeneID()
if interaction == increment: increment += original_increment; print '*',
interaction += 1;
denominator_probesets += 1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try:
firma_group_array[group_name].append(firma_score)
except KeyError:
firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists = [];
index = 0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list) > 2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list);
index += 1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try:
er = ed.ExonID()
except Exception:
er = 'NA'
export_list = [geneid + '\t' + er + '\t' + probeset];
export_list2 = []
for firma_ls in firma_lists:
if len(array_group_list) > 2: firma_ls = firma_ls[
1] ### See above modification of firma_list object for multiple group anlaysis
export_list += firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2, '\t') + '\n';
data.write(ev)
if len(array_group_list) == 2:
firma_list1 = firma_lists[0];
firma_list2 = firma_lists[-1];
firma_avg1 = statistics.avg(firma_list1);
firma_avg2 = statistics.avg(firma_list2)
index1 = 0;
index2 = 1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort();
index1 = firma_lists[0][-1];
index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1];
firma_list2 = firma_lists[-1][1];
firma_avg1 = firma_lists[0][0];
firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1, firma_list2, probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1 * firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2 * firma_avg1) < 0:
opposite_FIRMA_scores = 'yes'
else:
opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
else:
midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change) > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1];
data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1);
normInt2 = (experimental_exp - constit_exp2);
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
fid = ExonData(firma_fold_change, probeset, critical_exon_list, geneid, data_list1, data_list2,
normIntensityP, opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1);
fid.setConstitutiveFold(ge_fold);
fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change, fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change, geneid, normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort();
firma_hash.reverse()
print len(firma_hash), "Probesets with evidence of Alternative expression out of", len(excluded_probeset_db) + len(
firma_hash)
p_value_call = '';
permute_p_values = {};
summary_data_db['denominator_exp_events'] = denominator_probesets
return firma_hash, p_value_call, permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename, '.txt', '-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename, array_type, array_type + '/' + explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db, report_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_indirect_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db = {};
splicing_call_db = {}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair];
geneid = ed.GeneID();
critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid + ':' + exon
try:
new_exon_db[new_key].append(probeset_pair)
except KeyError:
new_exon_db[new_key] = [probeset_pair]
try:
splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError:
splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key];
probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair];
geneid = ed.GeneID()
jd = SimpleJunctionData(geneid, '', '', '',
probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort();
splicing_call = splicing_call_db[key][-1];
jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db = {};
domain_gene_count_db = {};
protein_functional_attribute_db = {};
probeset_aligning_db2 = {}
splicing_call_db = [];
new_exon_db = [] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = [];
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)' + domain)
new_domain_list2.append((domain, '+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = [];
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)' + domain)
new_domain_list2.append((domain, '-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);
exon_db = []
try:
clearObjectsFromMemory(new_exon_db)
except Exception:
null = []
probeset_indirect_aligning_db = [];
probeset_aligning_db = []
if report_type == 'perfect_match':
gene_protein_ft_db = [];
domain_gene_count_db = [];
protein_functional_attribute_db = []
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2 = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_db2 = [];
protein_functional_attribute_db = [];
probeset_aligning_db2 = []
len_gene_protein_ft_db = len(gene_protein_ft_db);
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
def importProbesetProteinCompDomains(exon_db, report_type, comp_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-domain-annotations-' + comp_type + '.txt'
if (
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename = getFilteredFilename(
filename)
filename = getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-protein-annotations-' + comp_type + '.txt'
if (
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename = getFilteredFilename(
filename)
filename = getExonVersionFilename(filename)
gene_protein_ft_db = {};
domain_gene_count_db = {}
for probeset in exon_db:
initial_proceed = 'no';
original_probeset = probeset
if probeset in probeset_aligning_db:
initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0], '|')[0]; probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id, probeset[
1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain, call = string.split(domain_data, '|')
except Exception:
values = string.split(domain_data, '|')
domain = values[0];
call = values[-1] ### occurs when a | exists in the annotations from UniProt
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain, call = string.split(domain_data, '|')
new_domain_list.append((domain, call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene, original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db = [] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs = {} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset) == 2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db = {};
probeset_protein_associations = {};
protein_db = {}
for probeset in exon_db:
initial_proceed = 'no';
original_probeset = probeset
if probeset in probeset_aligning_protein_db:
initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0], '|')[0]; probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id, \
probeset[1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list = probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info, call = string.split(protein_data, '|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info, ')', '*')
protein_info_r = string.replace(protein_info_r, '(', '*')
protein_info_r = string.split(protein_info_r, '*')
null_protein = protein_info_r[1];
hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein, hit_protein, call
protein_db[null_protein] = [];
protein_db[hit_protein] = []
new_protein_list.append((protein_info, call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene, original_probeset] = new_protein_list
filename = 'AltDatabase/' + species + '/' + array_type + '/SEQUENCE-protein-dbase_' + comp_type + '.txt'
filename = getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename, protein_db)
for key in protein_functional_attribute_db:
gene, probeset = key
try:
null_protein, hit_protein, call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0];
hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' + '(' + null_protein + ')' + null_seq + ' -> ' + '(' + hit_protein + ')' + hit_seq
protein_functional_attribute_db[key].append((seq_attr, call))
except KeyError:
null = []
protein_seq_db = [];
probeset_aligning_protein_db = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_protein_db = [];
len_gene_protein_ft_db = len(gene_protein_ft_db);
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid;
self._probeset1 = probeset1;
self._probeset2 = probeset2
self._probeset1_display = probeset1_display;
self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self, splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self, symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self, incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets, affygene, critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0], '|')
incl_probeset = incl_list[0];
excl_probeset = probesets[1]
else:
incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene, incl_probeset, excl_probeset, probesets[0], critical_exon_list)
key = incl_probeset, excl_probeset
return key, jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp;
self.exper_norm_exp = exper_norm_exp;
self.pval = pval;
self.ped = ped
def ConNI(self):
ls = []
for i in self.logConNI():
ls.append(math.pow(2, i))
return ls
def ExpNI(self):
ls = []
for i in self.logExpNI():
ls.append(math.pow(2, i))
return ls
def ConNIAvg(self):
return math.pow(2, statistics.avg(self.logConNI()))
def ExpNIAvg(self):
return math.pow(2, statistics.avg(self.logExpNI()))
def logConNI(self):
return self.baseline_norm_exp
def logExpNI(self):
return self.exper_norm_exp
def Pval(self):
return self.pval
def ProbesetExprData(self):
return self.ped
def __repr__(self):
return self.ConNI() + '|' + self.ExpNI()
def calculateAllASPIREScores(p1, p2):
b1o = p1.ConNIAvg();
b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg();
e2o = p2.ExpNIAvg();
original_score = statistics.aspire_stringent(b1o, e1o, b2o, e2o)
index = 0;
baseline_scores = [] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2, e2o, b1, e1o);
index += 1
baseline_scores.append(score)
index = 0;
exp_scores = [] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o, e1, b2o, e2);
index += 1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores, exp_scores, probability_statistic)
except Exception:
aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2 = []
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = [];
original_array_indices = permute_lists[
0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in nonlog_NI_db: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset', 'exclusion-probeset'] + original_array_names, '\t') + '\n';
NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl = 0
probeset_normIntensity_db = {}
for probeset in array_raw_group_values:
ed = exon_db[probeset];
geneid = ed.GeneID();
xl += 1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0;
si_interim_group_db = {};
ge_threshold_count = 0;
value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0;
ratio_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;
ge_val = avg_const_exp_db[geneid][value_count];
exp_ratio = exp_val - ge_val
ratio_hash.append(exp_ratio);
value_index += 1;
value_count += 1
si_interim_group_db[group_index] = ratio_hash
group_index += 1
group1_ratios = si_interim_group_db[0];
group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0];
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios, group2_ratios,
probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception:
normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset] = ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0;
t = 0;
y = '';
denominator_events = 0;
excluded_probeset_db = {}
splice_event_list = [];
splice_event_list_mx = [];
splice_event_list_non_mx = [];
event_mx_temp = [];
permute_p_values = {} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold = constit_exp2 - constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1];
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset();
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction();
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData([probeset1, probeset2], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events += 1
try:
p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval();
pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = '';
Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (experimental_ratio1 + experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin > 1 and Rex < 1):
y = 'downregulated'
elif (Rin < 1 and Rex > 1):
y = 'upregulated'
elif (Rex < Rin):
y = 'downregulated'
else:
y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1);
temp_list.append(exon_set2);
temp_list.sort()
if (affygene,
temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene, temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (experimental_ratio1 + experimental_ratio2)
y = 'mutually-exclusive';
r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin > 1 and Rex < 1) or (Rin < 1 and Rex > 1):
s += 1
in1 = ((Rex - 1.0) * Rin) / (Rex - Rin);
in2 = (Rex - 1.0) / (Rex - Rin)
dI = ((in2 - in1) + (I2 - I1)) / 2.0 #modified to give propper exon inclusion
dI = dI * (-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try:
baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1, p2)
except Exception:
baseline_scores = [0]; exp_scores = [dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores);
exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1, probeset2] + baseline_scores + exp_scores, '\t') + '\n';
NIdata_export.write(ev)
if max_replicates > 2 or equal_replicates == 2:
permute_p_values[(probeset1, probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1 < p_threshold or pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA') and abs(
dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI, probeset1, probeset2, pp1, pp2, y, event_call,
critical_exon_list, affygene, ped1, ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1);
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((dI, ejd))
elif aspireP < permute_p_threshold or aspireP == 'NA':
splice_event_list.append((dI, ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene + ':' + event.CriticalExonSets()[
0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
if analysis_method == 'linearregres' and Rex != '':
s += 1
log_fold, linregressP, rsqrd_status = getLinearRegressionScores(probeset1, probeset2,
group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates > 2 or equal_replicates == 2: permute_p_values[(probeset1, probeset2)] = [
linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1 < p_threshold or pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA') and abs(
log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold, probeset1, probeset2, pp1, pp2, y, event_call,
critical_exon_list, affygene, ped1, ped2)
ejd.setConstitutiveExpression(constit_exp1);
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((log_fold, ejd))
elif linregressP < permute_p_threshold:
splice_event_list.append((log_fold, ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene + ':' + event.CriticalExonSets()[
0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
else:
t += 1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db = {}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events'] = denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates = 0;
greater_than_two = 0;
greater_than_one = 0;
group_sizes = []
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates += len(group_values);
group_sizes.append(len(group_values))
if len(group_values) > 2:
greater_than_two += 1
elif len(group_values) > 1:
greater_than_one += 1
except Exception:
replicates += len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1:
equal_replicates = group_sizes[0]
else:
equal_replicates = 0
max_replicates = replicates / float(original_conditions)
if max_replicates < 2.01:
if greater_than_two > 0 and greater_than_one > 0: max_replicates = 3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort();
splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates > 2 or equal_replicates == 2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call = '';
permute_p_values = {}
else:
if max_replicates > 2 or equal_replicates == 2:
if probability_statistic == 'unpaired t-test':
p_value_call = analysis_method + '-OneWayAnova'
else:
p_value_call = analysis_method + '-' + probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call = 'OneWayAnova';
permute_p_values = {}
else:
p_value_call = probability_statistic;
permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db = {};
new_splice_event_list = []
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold, ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(), ejd.Probeset2()];
splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError:
proceed = 'no'
else:
proceed = 'yes'
if proceed == 'yes':
key, jd = formatJunctionData([ejd.Probeset1(), ejd.Probeset2()], ejd.GeneID(), ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold, ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(), ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(
ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(), ji.ExclusionProbeset()] = jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(
new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[
junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try:
ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(
ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try:
ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self, exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self, const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self, ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self, novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() + '|' + self.GeneID() + '|' + string.join(self.CriticalExons(), '|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self, score, probeset1, probeset2, probeset1_p, probeset2_p, regulation_call, event_call,
critical_exon_list, affygene, ped1, ped2):
self._score = score;
self._probeset1 = probeset1;
self._probeset2 = probeset2;
self._regulation_call = regulation_call
self._event_call = event_call;
self._critical_exon_list = critical_exon_list;
self._geneid = affygene
self._method = analysis_method;
self._normIntensityP = probeset1_p;
self._normIntensityP2 = probeset2_p
self.ped1 = ped1;
self.ped2 = ped2
class ExonData(SplicingScoreData):
def __init__(self, splicing_index, probeset, critical_exon_list, geneid, group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean):
self._score = splicing_index;
self._probeset1 = probeset;
self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list;
self._geneid = geneid
self._baseline_ratio1 = group1_ratios;
self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method;
self._event_call = 'exon-inclusion'
if splicing_index > 0:
regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else:
regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self):
return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self, splicing_index, geneid, normIntensityP):
self._score = splicing_index;
self._geneid = geneid;
self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1, probeset2, positions, group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores = [];
index1 = 0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a, pos2a) in positions:
index2 = 0
for (pos1b, pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a];
p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a];
p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2)
if log_fold < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
all_possible_scores.append((abs(log_fold), i1, i2))
index2 += 1
index1 += 1
all_possible_scores.sort()
try:
log_fold, index1, index2 = all_possible_scores[-1]
except Exception:
log_fold = 0; index1 = 0; index2 = 0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1, probeset2, group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]];
p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]];
p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1, probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1, probeset2, p1_g1, p2_g1, p1_g2, p2_g2, 2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1, probeset2, p1_g1, p2_g1, p1_g2, p2_g2, groups):
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index = 0;
group1_scores = []
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g1_sample], [p2_g1_sample])
group1_scores.append(log_f);
index += 1
index = 0;
group2_scores = []
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g2_sample], [p2_g2_sample])
group2_scores.append(log_f);
index += 1
try:
linregressP = statistics.runComparisonStatistic(group1_scores, group2_scores, probability_statistic)
except Exception:
linregressP = 0;
group1_scores = [0];
group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0;
group1_scores = [0];
group2_scores = [log_fold]
if export_NI_values == 'yes' and groups == 2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1, probeset2] + group1_scores + group2_scores, '\t') + '\n';
NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1, probeset2, p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp, p)
p2_g1, p2_g2 = permute_samples(p2_exp, p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {};
splice_event_list2 = []
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = [];
a = 0
for (score, x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score * (-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score;
probeset1 = x.Probeset1();
probeset2 = x.Probeset2();
affygene = x.GeneID()
y = 0;
p_splice_val_dist = [];
count = 0;
return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1, probeset2, p, y, ref_splice_val, x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1, probeset2, p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y += 1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val));
new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val < 0:
p_splice_val_dist2 = []
for val in p_splice_val_dist: p_splice_val_dist2.append(-1 * val)
p_splice_val_dist = p_splice_val_dist2;
p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,
new_ref_splice_val,
len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]):
greater_than_true_permute = (pos_permute / 2) - 1 #size of the two groups are equal
else:
greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (
pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1, probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append(
(score, x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:", len(splice_event_list2)
if len(permute_p_values) > 0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene, probeset1, probeset2, p, y, ref_splice_val, x):
### Get raw expression values for each permuted group for the two probesets
b1, e1 = permute_dI(array_raw_group_values[probeset1], p)
try:
b2, e2 = permute_dI(array_raw_group_values[probeset2], p)
except IndexError:
print probeset2, array_raw_group_values[probeset2], p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try:
bc, ec = permute_dI(avg_const_exp_db[affygene], p)
except IndexError:
print affygene, avg_const_exp_db[affygene], p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try:
p_splice_val = abs(
statistics.aspire_stringent(b1 / bc, e1 / ec, b2 / bc, e2 / ec)) ### This the permuted ASPIRE score
except Exception:
p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val);
new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val));
ref_splice_val = float(ref_splice_val[0:8]);
y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2;
print ref_splice_val, new_ref_splice_val, p
print b1 / bc, e1 / ec, b2 / bc, e2 / ec;
print (b1 / bc) / (e1 / ec), (b2 / bc) / (e2 / ec)
print x[7], x[8], x[9], x[10];
kill
return p_splice_val
def permute_samples(a, p):
baseline = [];
experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples, p):
baseline, experimental = permute_samples(all_samples, p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline);
ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb);
ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb, ge
def format_exon_functional_attributes(affygene, critical_probeset_list, functional_attribute_db, up_exon_list,
down_exon_list, protein_length_list):
### Add functional attributes
functional_attribute_list2 = []
new_functional_attribute_str = ''
new_seq_attribute_str = ''
new_functional_attribute_list = []
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
critical_probesets = critical_probeset_list[0]
else:
critical_probesets = tuple(critical_probeset_list)
key = affygene, critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try:
critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception:
print key, functional_attribute_db[key];kill
else:
critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1, val2 = string.split(functional_attribute_temp, '->')
else:
val2, val1 = string.split(functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2, val1 = string.split(functional_attribute_temp, '->')
else:
val1, val2 = string.split(functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
if x == 0 or (exclude_protein_details != 'yes'):
try:
new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list, down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene, critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i];
kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2 = []
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry:
new_seq_attribute_str = new_seq_attribute_str + entry + ','
else:
new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str, protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db, comparison_db, include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db = {};
cp_db = {} ###index the geneids for efficient recall in the next segment of code
for (affygene, annotation) in functional_attribute_db:
try:
fa_db[affygene].append(annotation)
except KeyError:
fa_db[affygene] = [annotation]
for (affygene, annotation) in comparison_db:
try:
cp_db[affygene].append(annotation)
except KeyError:
cp_db[affygene] = [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try:
functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError:
functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene, annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try:
functional_annotation_db[annotation] += 1
except KeyError:
functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try:
functional_annotation_db[annotation_val] += 1
except KeyError:
functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation, count))
annotation_list_ranked.append((count, annotation))
annotation_list_ranked.sort();
annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1, build_attribute_direction_databases):
attribute_db2 = {};
inclusion_attributes_hit_count = {};
exclusion_attributes_hit_count = {}
genes_with_inclusion_attributes = {};
genes_with_exclusion_attributes = {};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];
exon_attribute = key[1];
exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);
exon_list.sort()
attribute_exon_info = exon_attribute, exon_list #e.g. 5'UTR, [E1,E2,E3]
try:
attribute_db2[affygene].append(attribute_exon_info)
except KeyError:
attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction = exon_attribute[1:2];
unique_gene_attribute = exon_attribute[3:]
if direction == '+':
try:
inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError:
inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene] = []
if direction == '-':
try:
exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError:
exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene] = []
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes':
return attribute_db2, inclusion_attributes_hit_count, genes_with_inclusion_attributes, exclusion_attributes_hit_count, genes_with_exclusion_attributes
else:
return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string) < 1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list, data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir, AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir, '/')[-1]
proceed = True
if 'AltResult' in dir and AS != 'AS': proceed = False
if proceed:
try:
shutil.copyfile(filepath('Documentation/DirectoryDescription/' + file), filepath(root_dir + dir))
except Exception:
pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering';
filtered_probeset_db = {}
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception:
dir_list = []; fn_dir = ''
if len(dir_list) > 0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir + '/' + file;
fn = string.replace(fn, 'AltDatabase', 'AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name, "will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db;
annotate_db = {};
global splice_event_list;
splice_event_list = [];
residuals_dirlist = []
global dataset_name;
global constitutive_probeset_db;
global exon_db;
dir_list2 = [];
import_dir2 = ''
if array_type == 'AltMouse':
import_dir = root_dir + 'AltExpression/' + array_type
elif array_type == 'exon':
import_dir = root_dir + 'AltExpression/ExonArray/' + species + '/'
elif array_type == 'gene':
import_dir = root_dir + 'AltExpression/GeneArray/' + species + '/'
elif array_type == 'junction':
import_dir = root_dir + 'AltExpression/JunctionArray/' + species + '/'
else:
import_dir = root_dir + 'AltExpression/' + array_type + '/' + species + '/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse':
gene_annotation_file = "AltDatabase/ensembl/" + species + "/" + species + "_Ensembl-annotations.txt"
else:
gene_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + array_type + "_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file, array_type)
###Import probe-level associations
exon_db = {};
filtered_arrayids = {};
filter_status = 'no'
try:
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
except IOError:
print_out = 'The annotation database: \n' + probeset_annotations_file + '\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
run = 0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results':
import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir, 'AltExpression', 'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir, 'FullDatasets/AltMouse', 'FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir, 'AltExpression', 'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2, 'FullDatasets/AltMouse', 'FullDatasets/AltMouse/Mm')
try:
dir_list2 = read_directory(
import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon':
array_type_dir = 'ExonArray'
elif array_type == 'gene':
array_type_dir = 'GeneArray'
elif array_type == 'junction':
array_type_dir = 'GeneArray'
else:
array_type_dir = array_type
import_dir2 = string.replace(import_dir2, 'AltExpression/' + array_type_dir + '/' + species + '/', '')
import_dir2 = string.replace(import_dir2, 'AltExpression/' + array_type_dir + '/', '');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir2
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
try:
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon':
array_type_dir = 'ExonArray'
elif array_type == 'gene':
array_type_dir = 'GeneArray'
elif array_type == 'junction':
array_type_dir = 'JunctionArray'
else:
array_type_dir = array_type
import_dir = string.replace(import_dir, 'AltExpression/' + array_type_dir + '/' + species + '/', '')
import_dir = string.replace(import_dir, 'AltExpression/' + array_type_dir + '/', '');
try:
dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(
root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc()
badExit()
dir_list += dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/'
residuals_dirlist = read_directory(residual_dir)
except Exception:
null = []
try:
residual_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/'
residuals_dirlist += read_directory(residual_dir)
except Exception:
null = []
dir_list_verified = []
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list = [] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list) == 0:
print_out = 'No expression files available in the input directory:\n' + root_dir
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,
altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
if run > 0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db = {};
filtered_arrayids = {};
filter_status = 'no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
if altanalyze_input in dir_list2:
dataset_dir = import_dir2 + '/' + altanalyze_input ### Then not a pairwise comparison
else:
dataset_dir = import_dir + '/' + altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process", dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db;
filtered_probeset_db = {}
try:
filtered_probeset_db = restrictProbesets(dataset_name)
except Exception:
null = []
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try:
conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db = performExpressionAnalysis(
dataset_dir, constitutive_probeset_db, exon_db, annotate_db, dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + dataset_name + '" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
conditions = 0;
adj_fold_dbase = {};
nonlog_NI_db = {};
gene_expression_diff_db = {};
ex_db = {};
si_db = {}
defineEmptyExpressionVars(exon_db);
adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(
nonlog_NI_db, adj_fold_dbase, dataset_name, gene_expression_diff_db, exon_db, ex_db, si_db, dataset_dir)
aspire_output_list.append(aspire_output);
aspire_output_gene_list.append(aspire_output_gene)
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(
constitutive_probeset_db); constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db);last_exon_region_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase = []; clearObjectsFromMemory(
nonlog_NI_db);nonlog_NI_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db = []; clearObjectsFromMemory(
midas_db);midas_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(ex_db);ex_db = []; clearObjectsFromMemory(si_db);si_db = []
except Exception:
null = []
try:
run += 1
except Exception:
run = 1
if run > 0: ###run = 0 if no filtered expression data present
try:
return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n', import_dir, '\nor\n', import_dir2, '\nPlease re-run and select a valid input directory.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(
constitutive_probeset_db); constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db);last_exon_region_db = []
except Exception:
null = []
return None
def filterAltExpressionFiles(dir_list, current_files):
dir_list2 = []
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception:
dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase;
fold_dbase = {};
global original_fold_dbase;
global critical_exon_db;
critical_exon_db = {}
global midas_db;
midas_db = {};
global max_replicates;
global equal_replicates;
max_replicates = 0;
equal_replicates = 0
for probeset in exon_db: fold_dbase[probeset] = '', ''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file, 'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item + '\n')
else:
print item
log_report.close()
class StatusWindow:
def __init__(self, root, expr_var, alt_var, goelite_var, additional_var, exp_file_location_db):
root.title('AltAnalyze version 2.0.9.3 beta')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450;
width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos='n', label_text='Results Status Window',
usehullsize=1, hull_width=width, hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Output')
group.pack(fill='both', expand=1, padx=10, pady=0)
Label(group.interior(), width=190, height=552, justify=LEFT, bg='black', fg='white', anchor=NW, padx=5, pady=5,
textvariable=statusVar).pack(fill=X, expand=Y)
status = StringVarFile(statusVar, root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception:
pass
def deleteWindow(self):
try:
self.root.destroy()
except Exception:
pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception:
pass
sys.exit()
def exportComparisonSummary(dataset_name, summary_data_dbase, return_type):
log_report = open(log_file, 'a')
result_list = []
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: ' + dataset_name[:-1];
result_list.append(d + '\n')
d = summary_data_dbase['gene_assayed'] + ':\tAll genes examined';
result_list.append(d)
d = summary_data_dbase['denominator_exp_genes'] + ':\tExpressed genes examined for AS';
result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated probesets';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed probesets examined';
result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (
explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated junction-pairs';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed junction-pairs examined';
result_list.append(d)
else:
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated probesets';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed probesets examined';
result_list.append(d)
d = summary_data_dbase['alt_genes'] + ':\tAlternatively regulated genes (ARGs)';
result_list.append(d)
d = summary_data_dbase['direct_domain_genes'] + ':\tARGs - overlaping with domain/motifs';
result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits'] + ':\tARGs - overlaping with microRNA binding sites';
result_list.append(d)
result_list2 = []
for d in result_list:
if explicit_data_type == 'exon-only':
d = string.replace(d, 'probeset', 'exon')
elif array_type == 'RNASeq':
d = string.replace(d, 'probeset', 'junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d + '\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self, tl, analysis_type, output_dir, dataset_name, output_type, summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try:
self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try:
self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null = [] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.0.9 beta')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(tl);
can.pack(side='top');
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try:
runGOElite = run_GOElite
except Exception:
runGOElite = 'decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links) == 0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary';
height = 150;
width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos='n', label_text=label_text_str,
usehullsize=1, hull_width=width, hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
txt = Text(self.frame, bg='gray', width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n' + output_dir + '\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i = 0
copyDirectoryPDFs(output_dir, AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name, summary_data_dbase, 'print')
for d in result_list: txt.insert(END, d + '\n')
if 'QC' in summary_data_dbase and len(graphic_links) > 0:
txt.insert(END, '\nQC and Expression Clustering Plots', "font")
txt.insert(END, '\n\n 1) ')
for (name, file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i + 1):
txt.insert(END, '\n %s) ' % str(i + 2))
self.LINKS.append(file_dir)
i += 1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS = (url, '')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'ExonPlots/')
txt.tag_config('link', foreground="blue", underline=1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text='Results Folder', command=self.openDirectory)
open_results_folder.pack(side='left', padx=5, pady=5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf';
dg_pdf_file = filepath(dg_pdf_file);
self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side='right', padx=5, pady=5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf';
whatNext_pdf = filepath(whatNext_pdf);
self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else:
self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf';
whatNext_pdf = filepath(whatNext_pdf);
self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side='right', padx=5, pady=5)
quit_buttonTL = Button(tl, text='Close View', command=self.close)
quit_buttonTL.pack(side='right', padx=5, pady=5)
continue_to_next_win = Button(text='Continue', command=self.continue_win)
continue_to_next_win.pack(side='right', padx=10, pady=10)
quit_button = Button(root, text='Quit', command=self.quit)
quit_button.pack(side='right', padx=5, pady=5)
button_text = 'Help';
help_url = 'http://www.altanalyze.org/help_main.htm';
self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf';
pdf_help_file = filepath(pdf_help_file);
self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side='left', padx=5, pady=5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit(); self.tl.destroy()
except Exception:
None
try:
root.quit(); root.destroy()
except Exception:
None
UI.getUpdatedParameters(array_type, species, 'Process Expression file', output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try:
self.tl.quit(); self.tl.destroy()
except Exception:
self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit()
self.tl.destroy()
except Exception:
None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit(); self.tl.destroy()
except Exception:
pass
root.quit()
root.destroy()
try:
self.tl.grid_forget()
except Exception:
None
try:
root.grid_forget()
except Exception:
None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + self.output_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.output_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.output_dir + '/"')
def openSuppliedDirectory(self, dir):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + dir + '/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception:
null = []
if os.name == 'nt':
parent_dir = 'C:/Program Files';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir, application_name)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def Helplinkout(self):
self.GetHelpTopLevel(self.help_url, self.pdf_help_file)
def whatNextlinkout(self):
self.GetHelpTopLevel(self.whatNext_url, self.whatNext_pdf)
def ShowImageMPL(self, file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img = pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self, png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel();
self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos='n', label_text='',
usehullsize=1, hull_width=800, hull_height=550)
sf.pack(padx=0, pady=0, fill='both', expand=1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx=0, pady=0)
w = img.width()
h = height = img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self, png_file_dir):
if os.name == 'nt':
try:
os.startfile('"' + png_file_dir + '"')
except Exception:
os.system('open "' + png_file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + png_file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + png_file_dir + '"')
def DisplayPlots(self, file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=520, hull_height=500)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text=file_location)
group.pack(fill='both', expand=1, padx=10, pady=0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=20);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self, url, pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception:
ask_for_help = 'null'; config_db = {}
self.pdf_file = pdf_file;
self.url = url
if ask_for_help == 'null':
message = '';
self.message = message;
self.online_help = 'Online Documentation';
self.pdf_help = 'Local PDF File'
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=320,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/icon.gif';
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=20);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls);
l1.pack(side='bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp);
text_button2.pack(side='top', padx=5, pady=5)
try:
text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(
side='top', padx=5, pady=5)
except Exception:
text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(
side='top', padx=5, pady=5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp);
text_button3.pack(side='top', padx=5, pady=5)
c = Checkbutton(group.interior(), text="Apply these settings each time", command=self.setHelpConfig);
c.pack(side='bottom', padx=5, pady=0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF':
self.openPDFHelp()
elif help_choice == 'http':
self.openOnlineHelp()
else:
self.skip()
except Exception:
self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception:
cytoscape_type = 'null'; config_db = {}
if cytoscape_type == 'null':
message = '';
self.message = message
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=420,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/cyto-logo-smaller.gif';
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=5);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version';
self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls);
l1.pack(side='bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:');
l3.pack(side='top', pady=5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout);
text_button2.pack(padx=5, pady=5)
try:
text_button = Button(group.interior(), text=self.custom_cytoscape,
command=self.getPath); text_button.pack(padx=5, pady=5)
except Exception:
text_button = Button(group.interior(), text=self.custom_cytoscape,
command=self.getPath); text_button.pack(padx=5, pady=5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue");
l2.pack(side='bottom', padx=5, pady=0)
c = Checkbutton(group.interior(), text="Apply these settings each time and don't show again",
command=self.setCytoscapeConfig);
c.pack(side='bottom', padx=5, pady=0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try:
altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception:
altanalyze_path = ''
application_dir = 'Cytoscape_v'
if os.name == 'nt':
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir, application_name)
except Exception:
null = []
def setCytoscapeConfig(self):
config_db = {};
config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db = {};
config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir, '//', '/')
self.default_dir = string.replace(self.default_dir, '\\', '/')
self.default_dir = string.join(string.split(self.default_dir, '/')[:-1], '/')
except Exception:
dir = FindDir(parent_dir, application_dir);
dir = filepath(parent_dir + '/' + dir)
self.default_dir = filepath(parent_dir)
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls, initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls, initialdir=self.default_dir)
except Exception:
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception:
dirPath = ''
try:
#print [dirPath],application_name
app_dir = dirPath + '/' + application_name
if 'linux' in sys.platform:
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath + '/cytoscape.jar'
main_path = dirPath + '/cytoscape.CyMain'
plugins_path = dirPath + '/plugins'
os.system(
'java -Dswing.aatext=true -Xss5M -Xmx512M -jar ' + jar_path + ' ' + main_path + ' -p ' + plugins_path + ' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
openFile(app_dir2); print 'Cytoscape opened:', app_dir2
except Exception:
openFile(app_dir)
else:
openFile(app_dir)
try:
file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try:
self._tls.destroy()
except Exception:
None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try:
self._tls.destroy()
except Exception:
None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try:
os.startfile('"' + self.pdf_file + '"')
except Exception:
os.system('open "' + self.pdf_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.pdf_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.pdf_file + '"')
try:
self._tls.destroy()
except Exception:
None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self, stringVar, window):
self.__newline = 0;
self.__stringvar = stringVar;
self.__window = window
def write(self, s):
try:
log_report = open(log_file, 'a')
log_report.write(s);
log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k':
self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new + c
self.set(new)
except Exception:
pass
def set(self, s):
self.__stringvar.set(s); self.__window.update()
def get(self):
return self.__stringvar.get()
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today());
today = string.split(today, '-');
today = today[0] + '' + today[1] + '' + today[2]
time_stamp = string.replace(time.ctime(), ':', '')
time_stamp = string.replace(time_stamp, ' ', ' ')
time_stamp = string.split(time_stamp, ' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today + '-' + time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location;
global root_dir;
global log_file;
global summary_data_db;
summary_data_db = {};
reload(UI)
global probability_statistic;
global commandLineMode;
commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path, 'python', 'AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);
sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path, 'python',
'AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);
sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro, Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try:
probability_statistic = fl.ProbabilityStatistic()
except Exception:
probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root;
root = Tk()
StatusWindow(root, expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception:
sys.exit()
else:
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, '')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n" + log_file + "\nand report to genmapp@gladstone.ucsf.edu."
try:
if len(log_file) > 0:
if commandLineMode == 'no':
if os.name == 'nt':
try:
os.startfile('"' + log_file + '"')
except Exception:
os.system('open "' + log_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + log_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + log_file + '"')
if commandLineMode == 'no':
try:
UI.WarningWindow(print_out, 'Error Encountered!'); root.destroy()
except Exception:
print print_out
except Exception:
sys.exit()
except Exception:
sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root):
### Hard-coded defaults
w = 'Agilent';
x = 'Affymetrix';
y = 'Ensembl';
z = 'any';
data_source = y;
constitutive_source = z;
manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no';
test_results_pannel = 'no'
global species;
global array_type;
global expression_data_format;
global use_R;
use_R = 'no'
global analysis_method;
global p_threshold;
global filter_probeset_types
global permute_p_threshold;
global perform_permutation_analysis;
global export_NI_values
global run_MiDAS;
global analyze_functional_attributes;
global microRNA_prediction_method
global calculate_normIntensity_p;
global pathway_permutations;
global avg_all_for_ss;
global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets;
global expression_threshold;
global factor_out_expression_changes
global only_include_constitutive_containing_genes;
global remove_transcriptional_regulated_genes;
global add_exons_to_annotations
global exclude_protein_details;
global filter_for_AS;
global use_direct_domain_alignments_only;
global run_from_scratch
global explicit_data_type;
explicit_data_type = 'null'
global altanalyze_files;
altanalyze_files = []
species, array_type, manufacturer, constitutive_source, dabg_p, raw_expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, pathway_permutations, mod, returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try:
exon_exp_threshold = fl.ExonExpThreshold()
except Exception:
exon_exp_threshold = 'NA'
try:
gene_exp_threshold = fl.GeneExpThreshold()
except Exception:
gene_exp_threshold = 'NA'
try:
exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception:
exon_rpkm_threshold = 'NA'
try:
rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception:
rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(
raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try:
predictGroups = fl.predictGroups()
except Exception:
predictGroups = False
try:
if fl.excludeLowExpressionExons():
excludeLowExpExons = 'yes'
else:
excludeLowExpExons = 'no'
except Exception:
excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp)
global perform_element_permutation_analysis;
global permutations
perform_element_permutation_analysis = 'yes';
permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type != 'RNASeq'):
if run_from_scratch != 'Process AltAnalyze filtered':
try:
raw_expression_threshold = float(raw_expression_threshold)
except Exception:
raw_expression_threshold = 1
if raw_expression_threshold < 1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(", dabg_p, ") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print_items = []; #print [permute_p_threshold]; sys.exit()
print_items.append("AltAnalyze version 2.0.9 - Expression Analysis Parameters Being Used...")
print_items.append('\t' + 'database' + ': ' + unique.getCurrentGeneDatabaseVersion())
print_items.append('\t' + 'species' + ': ' + species)
print_items.append('\t' + 'method' + ': ' + array_type)
print_items.append('\t' + 'manufacturer' + ': ' + manufacturer)
print_items.append('\t' + 'probability_statistic' + ': ' + probability_statistic)
print_items.append('\t' + 'constitutive_source' + ': ' + constitutive_source)
print_items.append('\t' + 'dabg_p' + ': ' + str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t' + 'junction expression threshold' + ': ' + str(raw_expression_threshold))
print_items.append('\t' + 'exon_exp_threshold' + ': ' + str(exon_exp_threshold))
print_items.append('\t' + 'gene_exp_threshold' + ': ' + str(gene_exp_threshold))
print_items.append('\t' + 'exon_rpkm_threshold' + ': ' + str(exon_rpkm_threshold))
print_items.append('\t' + 'gene_rpkm_threshold' + ': ' + str(rpkm_threshold))
print_items.append('\t' + 'exclude low expressing exons for RPKM' + ': ' + excludeLowExpExons)
else:
print_items.append('\t' + 'raw_expression_threshold' + ': ' + str(raw_expression_threshold))
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'expression_data_format' + ': ' + expression_data_format)
print_items.append('\t' + 'include_raw_data' + ': ' + include_raw_data)
print_items.append('\t' + 'run_from_scratch' + ': ' + run_from_scratch)
print_items.append('\t' + 'perform_alt_analysis' + ': ' + perform_alt_analysis)
if avg_all_for_ss == 'yes':
cs_type = 'core'
else:
cs_type = 'constitutive'
print_items.append('\t' + 'calculate_gene_expression_using' + ': ' + cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used...")
print_items.append('\t' + 'analysis_method' + ': ' + analysis_method)
print_items.append('\t' + 'p_threshold' + ': ' + str(p_threshold))
print_items.append('\t' + 'filter_data_types' + ': ' + filter_probeset_types)
print_items.append('\t' + 'alt_exon_fold_variable' + ': ' + str(alt_exon_fold_variable))
print_items.append('\t' + 'gene_expression_cutoff' + ': ' + str(gene_expression_cutoff))
print_items.append('\t' + 'remove_intronic_junctions' + ': ' + remove_intronic_junctions)
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'permute_p_threshold' + ': ' + str(permute_p_threshold))
print_items.append('\t' + 'perform_permutation_analysis' + ': ' + perform_permutation_analysis)
print_items.append('\t' + 'export_NI_values' + ': ' + export_NI_values)
print_items.append('\t' + 'run_MiDAS' + ': ' + run_MiDAS)
print_items.append('\t' + 'use_direct_domain_alignments_only' + ': ' + use_direct_domain_alignments_only)
print_items.append('\t' + 'microRNA_prediction_method' + ': ' + microRNA_prediction_method)
print_items.append('\t' + 'analyze_all_conditions' + ': ' + analyze_all_conditions)
print_items.append('\t' + 'filter_for_AS' + ': ' + filter_for_AS)
if pathway_permutations == 'NA':
run_GOElite = 'decide_later'
else:
run_GOElite = 'run-immediately'
print_items.append('\t' + 'run_GOElite' + ': ' + run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:', commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes'] = 0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test', 'Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC'] = graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test';
results_dir = ''
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_data_db)
root.destroy();
sys.exit()
global export_go_annotations;
global aspire_output_list;
global aspire_output_gene_list
global filter_probesets_by;
global global_addition_factor;
global onlyAnalyzeJunctions
global log_fold_cutoff;
global aspire_cutoff;
global annotation_system;
global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception:
additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA':
analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA':
analyze_metaprobesets = 'yes'
else:
analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file = fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'AS', results_dir, dataset, 'parent', summary_data_db)
except Exception:
null = []
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type, species, run_from_scratch, results_dir)
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db, analyze_metaprobesets, filter_probeset_types, species,
root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db, analyze_metaprobesets, filter_probeset_types,
species, root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset];
apt_dir = fl.APTLocation()
if '/bin' in apt_dir:
apt_file = apt_dir + '/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt':
apt_file = apt_dir + '/PC/' + platform.architecture()[0] + '/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform:
apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture():
apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture():
apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file, 0777)
midas_dir = string.replace(apt_file, 'apt-probeset-summarize', 'apt-midas')
os.chmod(midas_dir, 0777)
APT.probesetSummarize(exp_file_location_db, analysis_method, filter_probeset_types, species, root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try:
ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq;
reload(RNASeq);
import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try:
fastq_folder = fl.RunKallisto()
except Exception:
print traceback.format_exc()
if len(fastq_folder) > 0:
try:
RNASeq.runKallisto(species, dataset, root_dir, fastq_folder, returnSampleNames=False)
biotypes = 'ran'
except Exception:
biotypes = 'failed'
else:
analyzeBAMs = False;
bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if '.bed' in string.lower(file):
bedFilesPresent = True
if analyzeBAMs and bedFilesPresent == False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/' + species + '/' + species + '_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir + '/BedRef/' + species + '_' + string.replace(dataset, 'exp.',
'')
analysisType = ['exon', 'junction', 'reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(bam_dir, refExonCoordinateFile, outputExonCoordinateRefBEDfile,
analysisType=analysisType, useMultiProcessing=fl.multiThreading(),
MLP=mlp, root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species, exp_file_location_db, dataset, Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
#print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
#print '!!!!!back inside AltAnalyze'
#returnLargeGlobalVars()
reload(RNASeq)
#print '!!!!!again'
#returnLargeGlobalVars()
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n' + biotypes
print_out += '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out += 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out += 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out, 'Export Complete')
try:
root.destroy(); sys.exit()
except Exception:
sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold = 100;
rpkm_threshold = 10
else:
exp_threshold = 200;
rpkm_threshold = 8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold,
rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if fl.NormMatrix() == 'quantile' and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try:
NormalizeDataset.normalizeDataset(fl.ExpFile())
except Exception:
print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species, array_type,
dabg_p, raw_expression_threshold, avg_all_for_ss,
expression_data_format,
manufacturer, constitutive_source, data_source,
include_raw_data,
perform_alt_analysis, ge_fold_cutoffs, ge_pvalue_cutoffs,
ge_ptype,
exp_file_location_db, root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics = []
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file, '.txt', '-steady-state.txt')
markerFinder_inputs = [exp_file, fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,
array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'DATASET', 'AVERAGE')
else:
group_exp_file = (input_exp_file, output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log':
logTransform = True
else:
logTransform = False
try:
markerFinder.analyzeData(group_exp_file, species, array_type, compendiumType,
AdditionalParameters=fl, logTransform=logTransform)
except Exception:
None
### Generate heatmaps (unclustered - order by markerFinder)
try:
graphics = markerFinder.generateMarkerHeatMaps(fl, array_type, graphics=graphics)
except Exception:
print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db[
'QC'] = fl.GraphicLinks() + graphics ### provides links for displaying QC and clustering plots
except Exception:
null = [] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir() + '/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
inputType = 'IDs'
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species, array_type, manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees, input_exp_file, gsp,
'')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
try:
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp); goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
except Exception:
pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n";
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type, species, run_from_scratch, file_dirs)
try:
AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else:
print '\n' + print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else:
altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null = [] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name;
global summary_results_db;
global summary_results_db2
summary_results_db = {};
summary_results_db2 = {};
aspire_output_list = [];
aspire_output_gene_list = []
onlyAnalyzeJunctions = 'no';
agglomerate_inclusion_probesets = 'no';
filter_probesets_by = 'NA'
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only':
onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions':
agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only':
analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else:
filter_probesets_by = filter_probeset_types
c = 'Ensembl';
d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff < 1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff), 2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold > 1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable < 1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try:
alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable), 2)
except Exception:
alt_exon_logfold_cutoff = 1
else:
alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations;
go_annotations = {}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
elif array_type == 'AltMouse':
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + 'MASTER-probeset-transcript.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else:
analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[
i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db, analysis_method, aspire_output_list, aspire_output_gene_list,
annotate_db, array_type, number_events_analyzed, root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2 = {}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction':
explicit_data_type = 'exon'
elif array_type == 'RNASeq':
explicit_data_type = 'junction'
else:
report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',
species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try:
alt_exon_logfold_cutoff = math.log(float(additional_score), 2)
except Exception:
alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db, analysis_method, aspire_output_list,
aspire_output_gene_list, annotate_db, 'exon', number_events_analyzed, root_dir)
if len(summary_data_db2) == 0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile():
pass
else:
dir_list = read_directory(fl.RootDir() + 'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir() + 'ExpressionInput/' + file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir() + '/ExpressionInput'
files = unique.read_directory(fl.RootDir() + '/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir + '/' + file)
try:
#"""
try:
graphic_links2, cluster_input_file = ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,
array_type,
expFile=fl.CountsFile(),
min_events=0,
med_events=1)
except Exception:
pass
#"""
inputpsi = fl.RootDir() + 'AltResults/AlternativeOutput/' + species + '_' + array_type + '_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
matrix, compared_groups, original_data = statistics.matrixImport(inputpsi)
matrix_pvalues = statistics.runANOVA(inputpsi, matrix, compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(inputpsi, original_data, matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try:
summary_data_db2['QC'] += graphic_link1
except Exception:
summary_data_db2['QC'] = graphic_link1
except Exception:
print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species, array_type, summary_results_db, root_dir)
try:
summary_data_db2['QC'] += graphic_link
except Exception:
summary_data_db2['QC'] = graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir() + '/AltResults/'
splicing_results_root = altresult_dir + '/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string = ''
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir, 'AltResults', 'ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
try:
altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception, e:
print traceback.format_exc()
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root + '/' + file
genes = UI.importGeneList(gene_dir, limit=50) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
show_introns = False
analysisType = 'plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root + '/' + file
try:
isoform_dir = UI.exportJunctionList(gene_dir, limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
UI.altExonViewer(species, array_type, expression_dir, gene_string, show_introns, analysisType, None);
print 'completed'
UI.altExonViewer(species, array_type, altresult_dir, gene_string, show_introns, analysisType, None);
print 'completed'
except Exception:
print traceback.format_exc()
try:
top_PSI_junction = inputpsi[:-4] + '-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(top_PSI_junction, limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species, fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try:
SashimiPlot.remoteSashimiPlot(species, fl, fl.RootDir(),
isoform_dir) ### assuming the bam files are in the root-dir
except Exception:
pass
print 'completed'
SashimiPlot.remoteSashimiPlot(species, fl, fl.RootDir(),
isoform_dir2) ### assuming the bam files are in the root-dir
print 'completed'
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db);
clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations);
clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception:
null = []
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time();
time_diff = int(end_time - start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
try:
input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp); goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root != '' and root != None:
print "Analysis Complete\n";
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'AS', results_dir, dataset_name, 'specific', summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root != '' and root != None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try:
UI.getUpdatedParameters(array_type, species, run_from_scratch, file_dirs)
except Exception:
pass
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
def exportSummaryResults(summary_results_db, analysis_method, aspire_output_list, aspire_output_gene_list, annotate_db,
array_type, number_events_analyzed, root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db, '', analysis_method, root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list, annotate_db, number_events_analyzed, 'no',
analysis_method, array_type, root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list, annotate_db, '', 'yes', analysis_method,
array_type, root_dir)
except UnboundLocalError:
print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn, species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl' + '-' + 'Affymetrix'
import gene_associations
try:
ensembl_to_probeset_id = gene_associations.getGeneToUid(species, mod_source)
except Exception:
ensembl_to_probeset_id = {}
mod_source = 'EntrezGene' + '-' + 'Affymetrix'
try:
entrez_to_probeset_id = gene_associations.getGeneToUid(species, mod_source)
except Exception:
entrez_to_probeset_id = {}
probeset_db = {}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset] = []
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset] = []
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {};
x = 0;
y = 0
fn = filepath(fn);
status = 'no'
for line in open(fn, 'r').readlines():
probeset_data = string.replace(line, '\n', '') #remove endline
probeset_data = string.replace(probeset_data, '---', '')
affy_data = string.split(probeset_data[1:-1], '","')
if x == 0 and line[0] != '#':
x = 1;
affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y += 1
elif x == 1:
try:
probeset = affy_data[ps]; csv_probesets[probeset] = []
except Exception:
null = []
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev;
self._species = species;
self._systems = systems;
self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode() + '|' + SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo();
species_names = {}
for species_full in species_codes:
sc = species_codes[species_full];
abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes, species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt';
x = 0
fn = filepath(filename);
species_codes = {}
for line in open(fn, 'rU').readlines():
data = cleanUpLine(line)
abrev, species, taxid, compatible_mods = string.split(data, '\t')
if x == 0:
x = 1
else:
compatible_mods = string.split(compatible_mods, '|')
sd = SpeciesData(abrev, species, compatible_mods, taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn = filepath('Config/goelite_species.txt');
data = open(fn, 'w');
x = 0
header = string.join(['species_code', 'species_name', 'tax_id', 'compatible_algorithms'], '\t') + '\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(), '|')
values = [sd.SpeciesCode(), sd.SpeciesName(), sd.TaxID(), mods]
values = string.join(values, '\t') + '\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]);
month = str(time_stamp[1]);
day = str(time_stamp[2])
if len(month) < 2: month = '0' + month
if len(day) < 2: day = '0' + day
return year + month + day
def verifyFile(filename):
status = 'not found'
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines(): status = 'found';break
except Exception:
status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
count += 1
if count > 9: break
except Exception:
null = []
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data, '\t')) == 3:
correct_format = True
break
except Exception:
correct_format = False
return correct_format
def displayHelp():
fn = filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn, 'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory, var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory + '/' + file, version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
global apt_location;
global root_dir;
global probability_statistic;
global log_file;
global summary_data_db;
summary_data_db = {}
###required
marker_finder = 'no'
manufacturer = 'Affymetrix'
constitutive_source = 'Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs', 'Mm', 'Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse', 'exon', 'gene', 'junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID = ''
PathwaySelection = ''
GeneSetSelection = ''
interactionDirs = []
inputType = 'ID list'
Genes = ''
degrees = 'direct'
includeExpIDs = True
update_interactions = False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display = False
accessoryAnalysis = ''
modelSize = None
geneModel = False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format = 'log'
runICGS = False
IDtype = None
runKallisto = False
original_arguments = sys.argv
arguments = []
for arg in original_arguments:
arg = string.replace(arg, '\xe2\x80\x9c', '') ### These are non-standard forward quotes
arg = string.replace(arg, '\xe2\x80\x9d', '') ### These are non-standard reverse quotes
arg = string.replace(arg, '\xe2\x80\x93', '-') ### These are non-standard dashes
arg = string.replace(arg, '\x96', '-') ### These are non-standard dashes
arg = string.replace(arg, '\x93', '') ### These are non-standard forward quotes
arg = string.replace(arg, '\x94', '') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:', arguments, '\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try:
displayHelp() ### Print out a help file and quit
except Exception:
print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[
1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:], '', ['species=', 'mod=', 'elitepval=', 'elitepermut=',
'method=', 'zscore=', 'pval=', 'num=',
'runGOElite=', 'denom=', 'output=', 'arraytype=',
'celdir=', 'expdir=', 'output=', 'statdir=',
'filterdir=', 'cdfdir=', 'csvdir=', 'expname=',
'dabgp=', 'rawexp=', 'avgallss=', 'logexp=',
'inclraw=', 'runalt=', 'altmethod=', 'altp=',
'probetype=', 'altscore=', 'GEcutoff=',
'exportnormexp=', 'calcNIp=', 'runMiDAS=',
'GEcutoff=', 'GEelitepval=', 'mirmethod=', 'ASfilter=',
'vendor=', 'GEelitefold=', 'update=', 'version=',
'analyzeAllGroups=', 'GEeliteptype=', 'force=',
'resources_to_analyze=', 'dataToAnalyze=', 'returnAll=',
'groupdir=', 'compdir=', 'annotatedir=',
'additionalScore=',
'additionalAlgorithm=', 'noxhyb=', 'platform=',
'bedDir=',
'altpermutep=', 'altpermute=',
'removeIntronOnlyJunctions=',
'normCounts=', 'buildExonExportFile=', 'groupStat=',
'compendiumPlatform=', 'rpkm=', 'exonExp=',
'specificArray=',
'ignoreBuiltSpecies=', 'ORAstat=', 'outputQCPlots=',
'runLineageProfiler=', 'input=', 'image=', 'wpid=',
'additional=', 'row_method=', 'column_method=',
'row_metric=', 'column_metric=', 'color_gradient=',
'transpose=', 'returnPathways=', 'compendiumType=',
'exonMapFile=', 'geneExp=', 'labels=', 'contrast=',
'plotType=', 'geneRPKM=', 'exonRPKM=',
'runMarkerFinder=',
'update_interactions=', 'includeExpIDs=', 'degrees=',
'genes=', 'inputType=', 'interactionDirs=',
'GeneSetSelection=',
'PathwaySelection=', 'OntologyID=', 'dataType=',
'combat=',
'channelToExtract=', 'showIntrons=', 'display=', 'join=',
'uniqueOnly=', 'accessoryAnalysis=', 'inputIDType=',
'outputIDType=',
'FEdir=', 'channelToExtract=', 'AltResultsDir=',
'geneFileDir=',
'AltResultsDir=', 'modelSize=', 'geneModel=',
'reference=',
'multiThreading=', 'multiProcessing=', 'genesToReport=',
'correlateAll=', 'normalization=', 'justShowTheseIDs=',
'direction=', 'analysisType=', 'algorithm=', 'rho=',
'clusterGOElite=', 'geneSetName=', 'runICGS=', 'IDtype=',
'CountsCutoff=', 'FoldDiff=', 'SamplesDiffering=',
'removeOutliers='
'featurestoEvaluate=', 'restrictBy=',
'ExpressionCutoff=',
'excludeCellCycle=', 'runKallisto=', 'fastq_dir=',
'FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)";
sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species':
species = arg
elif opt == '--arraytype':
if array_type != None:
additional_array_types.append(arg)
else:
array_type = arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray':
specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
cel_file_dir = arg
elif opt == '--bedDir':
cel_file_dir = arg
elif opt == '--FEdir':
cel_file_dir = arg
elif opt == '--expdir':
input_exp_file = arg
elif opt == '--statdir':
input_stats_file = arg
elif opt == '--filterdir':
input_filtered_dir = arg
elif opt == '--groupdir':
groups_file = arg
elif opt == '--compdir':
comps_file = arg
elif opt == '--cdfdir':
input_cdf_file = arg
elif opt == '--csvdir':
input_annotation_file = arg
elif opt == '--expname':
exp_name = arg
elif opt == '--output':
output_dir = arg
elif opt == '--vendor':
manufacturer = arg
elif opt == '--runICGS':
runICGS = True
elif opt == '--IDtype':
IDtype = arg
elif opt == '--ignoreBuiltSpecies':
ignore_built_species = arg
elif opt == '--platform':
if array_type != None:
additional_array_types.append(arg)
else:
array_type = arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update':
update_dbs = 'yes'; update_method.append(arg)
elif opt == '--version':
ensembl_version = arg
elif opt == '--compendiumPlatform':
compendiumPlatform = arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force':
force = arg
elif opt == '--input':
input_file_dir = arg; pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image':
image_export.append(arg)
elif opt == '--wpid':
wpid = arg
elif opt == '--mod':
mod = arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources = []
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler = arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType = arg
elif opt == '--denom':
denom_file_dir = arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract':
channel_to_extract = arg
elif opt == '--genesToReport':
genesToReport = int(arg)
elif opt == '--correlateAll':
correlateAll = True
elif opt == '--direction':
direction = arg
elif opt == '--logexp':
expression_data_format = arg
elif opt == '--geneRPKM':
rpkm_threshold = arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading = arg
if multiThreading == 'yes':
multiThreading = True
elif 'rue' in multiThreading:
multiThreading = True
else:
multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"';
sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name, '.txt', '')
exp_name = string.replace(exp_name, 'exp.', '')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis) > 0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try:
species = species
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
try:
array_type = array_type
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir) > 0:
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(values, 'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0:
pass
else:
'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast = 3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs = ''
removeOutliers = False
PathwaySelection = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric':
row_metric = arg
elif opt == '--column_metric':
column_metric = arg
elif opt == '--color_gradient':
color_gradient = arg
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection.append(arg)
elif opt == '--genes':
GeneSelection = arg
elif opt == '--ExpressionCutoff':
ExpressionCutoff = arg
elif opt == '--normalization':
normalization = arg
elif opt == '--justShowTheseIDs':
justShowTheseIDs = arg
elif opt == '--rho':
rho_cutoff = float(arg)
elif opt == '--clusterGOElite':
clusterGOElite = float(arg)
elif opt == '--CountsCutoff':
CountsCutoff = int(float(arg))
elif opt == '--FoldDiff':
FoldDiff = int(float(arg))
elif opt == '--SamplesDiffering':
SamplesDiffering = int(float(arg))
elif opt == '--removeOutliers':
removeOutliers = arg
elif opt == '--featurestoEvaluate':
featurestoEvaluate = arg
elif opt == '--restrictBy':
restrictBy = arg
elif opt == '--excludeCellCycle':
excludeCellCycle = arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no':
excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative':
excludeCellCycle = True
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float';sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(PathwaySelection) == 0: PathwaySelection = ''
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff, CountsCutoff, FoldDiff, SamplesDiffering,
removeOutliers, featurestoEvaluate, restrictBy, excludeCellCycle,
column_metric, column_method, rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/' + 'exp.' + exp_name + '.txt'
elif input_exp_file != '':
if 'ExpressionInput' in input_exp_file:
expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file, 'exp.', '')
root_dir = export.findParentDir(expFile)
expFile = root_dir + '/ExpressionInput/exp.' + export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir, '/ExpressionInput', '')
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4] + '-steady-state.txt')
if count > 1:
expFile = expFile[:-4] + '-steady-state.txt'
elif array_type == 'RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(values, 'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4] + '-steady-state.txt'
print [excludeCellCycle]
UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp, (
species, array_type)) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid == None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';
sys.exit()
if species == None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart62Plus)';
sys.exit()
if input_file_dir == None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';
sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:', mod
print 'species_code:', species
print 'wpid:', wpid
print 'input GO-Elite ID file:', input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir, species, mod, wpid)
except Exception, e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart65\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid, '\n'
try:
printout = 'Finished exporting visualized pathway to:', graphic_link['WP']
print printout, '\n'
except Exception:
None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge = []
join_option = 'Intersection'
uniqueOnly = False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge) < 2:
print 'Please designate two or more files to merge (--input)';
sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType = None
outputIDType = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType == None or outputIDType == None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)';
sys.exit()
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';
sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast = 2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric':
row_metric = arg
elif opt == '--column_metric':
column_metric = arg
elif opt == '--color_gradient':
color_gradient = arg
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection = arg
elif opt == '--genes':
GeneSelection = arg
elif opt == '--OntologyID':
OntologyID = arg
elif opt == '--normalization':
normalization = arg
elif opt == '--justShowTheseIDs':
justShowTheseIDs = arg
elif opt == '--rho':
rho = arg
elif opt == '--clusterGOElite':
clusterGOElite = arg
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float';sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try:
gsp.setClusterGOElite(clusterGOElite)
except Exception:
pass
if rho != None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception:
print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection) > 0:
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
try:
files = unique.read_directory(input_file_dir + '/')
dir = input_file_dir
for file in files:
filename = dir + '/' + file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient,
transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient,
transpose, contrast, None, display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels = arg
if include_labels == 'True':
include_labels = 'yes'
if opt == '--plotType': plotType = arg
if opt == '--algorithm': pca_algorithm = arg
if opt == '--geneSetName': geneSetName = arg
if opt == '--zscore':
if arg == 'yes' or arg == 'True' or arg == 'true':
zscore = True
else:
zscore = False
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';
sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None, plotType=plotType,
display=display, geneSetName=geneSetName, species=species, zscore=zscore)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if len(files_to_merge) < 2:
print 'Please designate two or more files to compare (--input)';
sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes = []
show_introns = 'no'
geneFileDir = ''
analysisType = 'plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':
genes = arg
elif opt == '--dataType':
data_type = arg
elif opt == '--showIntrons':
show_introns = arg
elif opt == '--AltResultsDir':
altresult_dir = arg
elif opt == '--geneFileDir':
geneFileDir = arg
elif opt == '--analysisType':
analysisType = arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)';
sys.exit()
if len(genes) == 0 and len(geneFileDir) == 0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";
sys.exit()
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)";
sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir += '/AltResults/'
if 'Sashimi' in analysisType:
altresult_dir = string.split(altresult_dir, 'AltResults')[0]
genes = geneFileDir
geneFileDir = ''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir, 'AltResults', 'ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir) == 0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in', altanalyze_results_folder;
sys.exit()
else:
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
try:
altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception, e:
print "No files found in: " + altanalyze_results_folder;
sys.exit()
if len(geneFileDir) > 0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir + '/')
gene_string = ''
for file in files:
if '.txt' in file:
filename = geneFileDir + '/' + file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species, platform, altresult_dir, gene_string, show_introns, analysisType,
False)
except Exception:
pass
sys.exit()
if len(genes) == 0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")';
sys.exit()
try:
UI.altExonViewer(species, platform, altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions':
update_interactions = arg
elif opt == '--includeExpIDs':
includeExpIDs = arg
elif opt == '--degrees':
degrees = arg
elif opt == '--genes':
Genes = arg
inputType = 'IDs'
elif opt == '--inputType':
inputType = arg
elif opt == '--interactionDirs':
interactionDirs.append(arg)
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection = arg
elif opt == '--OntologyID':
OntologyID = arg
elif opt == '--display':
display = arg
if update_interactions == 'yes':
update_interactions = True
else:
update_interactions = False
if input_file_dir == None:
pass
elif len(input_file_dir) == 0:
input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs = ['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets', 'common-microRNATargets',
'all-microRNATargets', 'common-DrugBank', 'all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways', 'KEGG', 'TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None:
pass
elif len(output_dir) == 0:
output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs == 'yes':
includeExpIDs = True
else:
includeExpIDs = False
gsp = UI.GeneSelectionParameters(species, array_type, manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).';
sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)';
sys.exit()
if input_file_dir != None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees, input_exp_file,
gsp, root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
try:
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees,
input_exp_file, gsp, root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None, inputType, output_dir, interactionDirs, degrees, input_exp_file, gsp, root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version, 'EnsMart', '')
UI.exportDBversion('EnsMart' + dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:', gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array).";
sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try:
archive_dir = 'ArchiveDBs/EnsMart' + ensembl_version + '/archive'; export.createDirPath(
filepath(archive_dir))
except Exception:
null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart' + ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath(
'ArchiveDBs/EnsMart' + ensembl_version + '/' + species_dir + '/' + species_dir + '_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart' + ensembl_version + '/archive/' + species_dir + '_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src, 'RNASeq.', 'junction.');
dstnj = string.replace(dstn, 'RNASeq.', 'junction.')
shutil.move(srcj, dstnj)
except Exception:
null = []
try:
src = string.replace(src, '_RNASeq.', '.');
dstn = string.replace(dstn, '_RNASeq.', '.')
shutil.move(src, dstn)
except Exception:
null = []
except Exception:
null = []
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/' + species + '/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/' + species + '/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'):
array_type = ['AltMouse', 'exon', 'gene', 'junction', 'RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'):
array_type = ['exon', 'gene', 'junction', 'RNASeq']
else:
array_type = [array_type] + additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected':
species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {};
all_species_names = {}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]] = species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(
'release-' + ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species, '_', ' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]] = []
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx = [];
all_species_names2 = [] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx + all_species_names2
species = all_species_names
else:
species = [species]
update_uniprot = 'no';
update_ensembl = 'no';
update_probeset_to_ensembl = 'no';
update_domain = 'no';
update_miRs = 'no';
genomic_build = 'new';
update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot = 'yes';
update_ensembl = 'yes';
update_probeset_to_ensembl = 'yes';
update_domain = 'yes';
update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try:
from Bio import Entrez #test this
except Exception:
print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try:
force = force ### Variable is not declared otherwise
except Exception:
force = 'yes'; print 'force:', force
existing_species_dirs = {}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try:
print "Updating AltDatabase the following array_types", string.join(
array_type), "for the species", string.join(species)
except Exception:
print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm':
proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'):
proceed = 'yes'
elif platform_name == 'RNASeq':
proceed = 'yes'
else:
proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species) == 1:
update_uniprot = 'no';
update_ensembl = 'no';
update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species', array_type, platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory(
'/AltDatabase/ensembl') ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name: platform_name += '|' + specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl', update_ensembl
print 'update_uniprot', update_uniprot
print 'update_probeset_to_ensembl', update_probeset_to_ensembl
print 'update_domain', update_domain
print 'update_miRs', update_miRs
update.executeParameters(specific_species, platform_name, force, genomic_build,
update_uniprot, update_ensembl, update_probeset_to_ensembl,
update_domain, update_miRs, update_all, update_miR_seq,
ensembl_version)
else:
print 'ignoring', specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart' + ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo();
possible_species = {}
possible_species = species_names
possible_arrays = ['exon', 'gene', 'junction', 'AltMouse', 'RNASeq']
try:
if species == 'all':
possible_species = possible_species
elif species == 'selected':
possible_species = selected_species
else:
possible_species = [species]
except Exception:
species = possible_species
if array_type == None or array_type == 'all':
possible_arrays = possible_arrays
else:
possible_arrays = [array_type] + additional_array_types
species_to_package = {}
dirs = unique.read_directory('/AltDatabase/' + ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/' + ensembl_version + '/' + species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/' + ensembl_version + '/' + species_code)
try:
species_to_package[species_code].append(arraytype)
except Exception:
species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy = [species + '_Ensembl_domain_aligning_probesets.txt']
files_to_copy += [species + '_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy += [species + '_Ensembl_probesets.txt']
files_to_copy += [species + '_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy += [species + '_exon_core.mps']
files_to_copy += [species + '_exon_extended.mps']
files_to_copy += [species + '_exon_full.mps']
files_to_copy += [species + '_gene_core.mps']
files_to_copy += [species + '_gene_extended.mps']
files_to_copy += [species + '_gene_full.mps']
files_to_copy += [species + '_gene-exon_probesets.txt']
files_to_copy += [species + '_probes_to_remove.txt']
files_to_copy += [species + '_probeset-probes.txt']
files_to_copy += [species + '_probeset_microRNAs_any.txt']
files_to_copy += [species + '_probeset_microRNAs_multiple.txt']
files_to_copy += ['probeset-domain-annotations-exoncomp.txt']
files_to_copy += ['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy += ['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy += [species + '_Ensembl_junction_probesets.txt']
files_to_copy += [species + '_Ensembl_AltMouse_probesets.txt']
files_to_copy += [species + '_RNASeq-exon_probesets.txt']
files_to_copy += [species + '_junction-exon_probesets.txt']
files_to_copy += [species + '_junction_all.mps']
files_to_copy += [
'platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy += [species + '_junction_comps_updated.txt']
files_to_copy += ['MASTER-probeset-transcript.txt']
files_to_copy += ['AltMouse-Ensembl.txt']
files_to_copy += ['AltMouse_junction-comparisons.txt']
files_to_copy += ['AltMouse_gene_annotations.txt']
files_to_copy += ['AltMouse_annotations.txt']
common_to_copy = ['uniprot/' + species + '/custom_annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl-annotations_simple.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl-annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_microRNA-Ensembl.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_transcript-biotypes.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_transcript-annotations.txt']
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'Ensembl_Protein')
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'ProteinFeatures')
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'ProteinCoordinates')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[
species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
export.copyFile(ir + file, er + file)
if 'RNASeq' in species_to_package[species]:
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_junction.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir + file, er + file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'junction':
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + '/'
for file in files_to_copy:
if array_type == 'RNASeq': file = string.replace(file, '_updated.txt', '.txt')
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if len(export_path) > 0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
export_path = []
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + subdir
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path = []
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version
update.zipDirectory(src);
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '.zip'
update.zipDirectory(src);
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/junction'
dst = string.replace(src, 'junction', species + '_junction.zip')
update.zipDirectory(src);
print 'Zipping', species + '_junction'
os.rename(src + '.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";
sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";
sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species, platform, input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'Raw', 'AVERAGE')
else:
group_exp_file = string.replace(input_exp_file, 'FullDatasets', 'AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file, species, platform, compendiumType,
geneToReport=genesToReport,
correlateAll=correlateAll, AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform, altexon_correlation_file, group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try:
test_ordereddict = collections.OrderedDict()
except Exception:
try:
import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,
platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'DATASET', 'AVERAGE')
else:
group_exp_file = (input_exp_file, output_dir) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file, '', '', '');
fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
if platform == 'RNASeq':
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try:
correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception:
correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log':
logTransform = True
else:
logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species, platform, fl, input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species, platform, fl, input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file, species, platform, compendiumType,
geneToReport=genesToReport, correlateAll=correlateAll,
AdditionalParameters=fl, logTransform=logTransform)
try:
fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try:
markerFinder.generateMarkerHeatMaps(fl, array_type, convertNonLogToLog=logTransform)
except Exception:
print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';
sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir) > 0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir > 0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]:
null = []
else:
output_dir += '/'
log_file = filepath(output_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
sys.stdout = Logger('')
except Exception, e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';
sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir) > 0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir) > 0:
run_from_scratch = 'Process AltAnalyze filtered';
proceed = 'yes'
if len(input_exp_file) > 0:
run_from_scratch = 'Process Expression file';
proceed = 'yes'
input_exp_file = string.replace(input_exp_file, '\\',
'/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file, '/')
if len(output_dir) > 0:
parent_dir = output_dir
else:
parent_dir = string.join(ief_list[:-1], '/')
exp_name = ief_list[-1]
if len(cel_file_dir) > 0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding.";
sys.exit()
else:
dataset_name = 'exp.' + exp_name + '.txt';
exp_file_dir = filepath(output_dir + '/ExpressionInput/' + dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch != 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files';
proceed = 'yes'
if array_type == 'RNASeq':
file_ext = '.BED'
else:
file_ext = '.CEL'
try:
cel_files, cel_files_fn = UI.identifyCELfiles(cel_file_dir, array_type, manufacturer)
except Exception, e:
print e
if mappedExonAnalysis:
pass
else:
print "No", file_ext, "files found in the directory:", cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn, cel_file_dir)
if groups_file != None and comps_file != None:
try:
export.copyFile(groups_file, string.replace(exp_file_dir, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(exp_file_dir, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file = string.replace(exp_file_dir, 'exp.', 'comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer != 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types, specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null = [];
num_array_types = 1;
specific_array_type = None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse':
specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type];
species = sa.Species();
array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,
array_type, species)
else:
array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [
specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles';
sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file = '';
bgp_file = '';
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file, '/');
cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent + cdf_short)
info_list = input_cdf_file, destination_parent;
UI.StatusWindow(info_list, 'copy')
else:
print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file, '/');
parent_dir = string.join(icf_list[:-1], '/');
cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short, '.pgf', '.clf')
kil_short = string.replace(cdf_short, '.pgf', '.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_short = string.replace(cdf_short, '.pgf', '.antigenomic.bgp')
else:
bgp_short = string.replace(cdf_short, '.pgf', '.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file, '.pgf', '.clf')
kil_file = string.replace(pgf_file, '.pgf', '.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_file = string.replace(pgf_file, '.pgf', '.antigenomic.bgp')
else:
bgp_file = string.replace(pgf_file, '.pgf', '.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file, osfilepath(destination_parent + cdf_short);
UI.StatusWindow(info_list, 'copy')
info_list = clf_file, osfilepath(destination_parent + clf_short);
UI.StatusWindow(info_list, 'copy')
info_list = bgp_file, osfilepath(destination_parent + bgp_short);
UI.StatusWindow(info_list, 'copy')
if 'Glue' in pgf_file:
info_list = kil_file, osfilepath(destination_parent + kil_short);
UI.StatusWindow(info_list, 'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file, '/');
csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/' + species + '/'
info_list = input_annotation_file, filepath(destination_parent + csv_short);
UI.StatusWindow(info_list, 'copy')
except Exception:
print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/" + species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,
species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only, microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',
species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.';
sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite':
run_GOElite = arg
elif opt == '--outputQCPlots':
visualize_qc_results = arg
elif opt == '--runLineageProfiler':
run_lineage_profiler = arg
elif opt == '--elitepermut':
goelite_permutations = arg
elif opt == '--method':
filter_method = arg
elif opt == '--zscore':
z_threshold = arg
elif opt == '--elitepval':
p_val_threshold = arg
elif opt == '--num':
change_threshold = arg
elif opt == '--dataToAnalyze':
resources_to_analyze = arg
elif opt == '--GEelitepval':
ge_pvalue_cutoffs = arg
elif opt == '--GEelitefold':
ge_fold_cutoffs = arg
elif opt == '--GEeliteptype':
ge_ptype = arg
elif opt == '--ORAstat':
ORA_algorithm = arg
elif opt == '--returnPathways':
returnPathways = arg
elif opt == '--FDR':
FDR_statistic = arg
elif opt == '--dabgp':
dabg_p = arg
elif opt == '--rawexp':
expression_threshold = arg
elif opt == '--geneRPKM':
rpkm_threshold = arg
elif opt == '--exonRPKM':
exon_rpkm_threshold = arg
elif opt == '--geneExp':
gene_exp_threshold = arg
elif opt == '--exonExp':
exon_exp_threshold = arg
elif opt == '--groupStat':
probability_statistic = arg
elif opt == '--avgallss':
avg_all_for_ss = arg
elif opt == '--logexp':
expression_data_format = arg
elif opt == '--inclraw':
include_raw_data = arg
elif opt == '--combat':
batch_effects = arg
elif opt == '--runalt':
perform_alt_analysis = arg
elif opt == '--altmethod':
analysis_method = arg
elif opt == '--altp':
p_threshold = arg
elif opt == '--probetype':
filter_probeset_types = arg
elif opt == '--altscore':
alt_exon_fold_variable = arg
elif opt == '--GEcutoff':
gene_expression_cutoff = arg
elif opt == '--removeIntronOnlyJunctions':
remove_intronic_junctions = arg
elif opt == '--normCounts':
normalize_feature_exp = arg
elif opt == '--normMatrix':
normalize_gene_data = arg
elif opt == '--altpermutep':
permute_p_threshold = arg
elif opt == '--altpermute':
perform_permutation_analysis = arg
elif opt == '--exportnormexp':
export_NI_values = arg
elif opt == '--buildExonExportFile':
build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder':
marker_finder = arg
elif opt == '--calcNIp':
calculate_normIntensity_p = arg
elif opt == '--runMiDAS':
run_MiDAS = arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions = arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff':
use_direct_domain_alignments_only = arg
elif opt == '--mirmethod':
microRNA_prediction_method = arg
elif opt == '--ASfilter':
filter_for_AS = arg
elif opt == '--noxhyb':
xhyb_remove = arg
elif opt == '--returnAll':
return_all = arg
elif opt == '--annotatedir':
external_annotation_dir = arg
elif opt == '--additionalScore':
additional_score = arg
elif opt == '--additionalAlgorithm':
additional_algorithms = arg
elif opt == '--modelSize':
modelSize = arg
try:
modelSize = int(modelSize)
except Exception:
modelSize = None
elif opt == '--geneModel':
geneModel = arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors, db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults, '')
if len(species) == 2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else:
species_full = species
print 'Species name to update:', species_full
db_version_list = []
for version in db_versions: db_version_list.append(version)
db_version_list.sort();
db_version_list.reverse();
select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version', ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart' + ensembl_version
if ensembl_version not in db_versions:
try:
UI.getOnlineEliteDatabase(file_location_defaults, ensembl_version, [species], 'no',
''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while', select_version, 'is.';
sys.exit()
else:
select_version = ensembl_version
### Export basic species information
sc = species;
db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and (
'expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults, ensembl_version, [species], update_goelite_resources,
'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[
0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else:
additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species], additionalResources, 'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(species, array_type, specific_array_type, 'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';
sys.exit()
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold) - 1
goelite_permutations = int(goelite_permutations);
change_threshold = change_threshold
p_val_threshold = float(p_val_threshold);
z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception, e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';
sys.exit()
if run_GOElite == None or run_GOElite == 'no':
goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output";
sys.exit()
try:
expression_threshold = float(expression_threshold)
except Exception:
expression_threshold = 1
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more':
microRNA_prediction_method = 'multiple'
else:
microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir == None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir, '/')[:i], '/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species, mod, goelite_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, ''
GO_Elite.remoteAnalysis(goelite_var, 'non-UI', Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file.";
sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type == None:
print "Please include a platform name (e.g., --platform RNASeq)";
sys.exit()
if species == None:
print "Please include a species name (e.g., --species Hs)";
sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65\n';
sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';
sys.exit()
try:
fl = UI.ExpressionFileLocationData('', '', '', '')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try:
expr_input_dir
except Exception:
expr_input_dir = input_file_dir
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(species, array_type, specific_array_type, 'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';
sys.exit()
probeset_types = ['full', 'core', 'extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1;
expression_threshold = 1;
p_threshold = 1;
alt_exon_fold_variable = 1
gene_expression_cutoff = 10000;
filter_probeset_types = 'full';
exon_exp_threshold = 1;
rpkm_threshold = 0
gene_exp_threshold = 1;
exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold);
alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold);
gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p);
additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try:
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
gene_expression_cutoff = 0
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = -1
try:
exon_exp_threshold = float(exon_exp_threshold)
except Exception:
exon_exp_threshold = 0
try:
gene_exp_threshold = float(gene_exp_threshold)
except Exception:
gene_exp_threshold = 0
try:
exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception:
exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:", filter_probeset_types, '. Must be "full", "extended" or "core"';
sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA':
filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:", dabg_p, '. Must be > 0 and <= 1';
sys.exit()
if expression_threshold < 1:
print "Invalid expression threshold entered:", expression_threshold, '. Must be > 1';
sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:", p_threshold, '. Must be > 0 and <= 1';
sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:", alt_exon_fold_variable, '. Must be > 1';
sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:", gene_expression_cutoff, '. Must be > 1';
sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:", additional_score, '. Must be > 1';
sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:", rpkm_threshold, '. Must be >= 0';
sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:", exon_exp_threshold, '. Must be > 1';
sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:", exon_rpkm_threshold, '. Must be >= 0';
sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:", gene_exp_threshold, '. Must be > 1';
sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms);
additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes':
avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else:
avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes':
perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no':
perform_alt_analysis = 'expression'
elif platform != "3'array":
perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try:
permute_p_threshold = float(permute_p_threshold)
except Exception:
permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species, array_type, manufacturer, constitutive_source, dabg_p, expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis
alt_var = analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, goelite_permutations, mod, returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('', '', '', '');
fl.setExonBedBuildStatus('yes');
fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir);
fl.setArrayType(array_type);
fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl;
parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file) > 0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file:
new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file) + 'exp.' + export.findFilename(
input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir + 'ExpressionInput/' + export.findFilename(new_exp_file)
try:
export.copyFile(input_exp_file, new_exp_file)
except Exception:
print 'Expression file already present in target location.'
try:
export.copyFile(groups_file, string.replace(new_exp_file, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(new_exp_file, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file, 'exp.', 'groups.')
comps_file = string.replace(new_exp_file, 'exp.', 'comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file) > 1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n" + input_stats_file + "\ndoes not have the same array order as the\nexpression file. Correct before proceeding.";
sys.exit()
except Exception:
print '\nWARNING...Expression file not found: "' + input_exp_file + '"\n\n'; sys.exit()
exp_name = string.replace(exp_name, 'exp.', '');
dataset_name = exp_name;
exp_name = string.replace(exp_name, '.txt', '')
groups_name = 'ExpressionInput/groups.' + dataset_name;
comps_name = 'ExpressionInput/comps.' + dataset_name
groups_file_dir = output_dir + '/' + groups_name;
comps_file_dir = output_dir + '/' + comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files, output_dir, exp_name)
if files_exported == 'yes':
print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
pass
else:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file, input_stats_file, groups_file_dir, comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try:
array_group_list, group_db = UI.importArrayGroupsSimple(groups_file_dir, cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';
sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db = {};
exp_file_location_db[exp_name] = fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try:
shutil.copyfile(groups_file, string.replace(exp_file_dir, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
shutil.copyfile(comps_file, string.replace(exp_file_dir, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir, 'exp.', 'stats.')
groups_file_dir = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file_dir = string.replace(exp_file_dir, 'exp.', 'comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis:
pass
else:
files_exported = UI.predictGroupsAndComps(cel_files, output_dir, exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir, stats_file_dir, groups_file_dir, comps_file_dir)
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list, group_db = UI.importArrayGroupsSimple(groups_file_dir, cel_files)
UI.exportGroups(exp_file_location_db, array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try:
fl.setRunKallisto(input_fastq_dir)
except Exception:
pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir, '/')
input_filtered_dir = string.join(dirs[:-1], '/')
fl = UI.ExpressionFileLocationData('', '', '', '');
dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir, 'AltExpression');
parent_dir = dirs[0]
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults, run_from_scratch, run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (
array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file);
fl.setCLFFile(clf_file);
fl.setBGPFile(bgp_file);
fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir);
fl.setArrayType(array_type_original);
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir);
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir);
fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset];
fl.setRootDir(parent_dir)
apt_location = fl.APTLocation()
root_dir = fl.RootDir();
fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try:
fl.setFDRStatistic(FDR_statistic)
except Exception:
pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try:
fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception:
fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try:
dirs = unique.read_directory('/AltDatabase')
except Exception:
dirs = []
if species not in dirs:
print '\n' + species, 'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species', species, '--version EnsMart65").'
global commandLineMode;
commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv, ' ')
arguments = string.split(command_args, ' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument, ' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1], ':')
if z != -1 and z != 1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.';
sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv, ' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
#print [command_args];sys.exit()
if len(sys.argv[1:]) > 0 and '--' in command_args:
if '--GUI' in command_args:
AltAnalyzeSetup(
'no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv, ' ')
if len(sys.argv[1:]) > 1 and '-' in command_args:
null = []
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError:
use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root;
root = Tk()
global pathway_permutations;
pathway_permutations = 'NA'
global log_file;
log_file = 'null.txt'
global array_type;
global explicit_data_type
global run_GOElite;
run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('', '', '', '')
graphic_links = []
graphic_links.append(['PCA', 'PCA.png'])
graphic_links.append(['HC', 'HC.png'])
graphic_links.append(['PCA1', 'PCA.png'])
graphic_links.append(['HC1', 'HC.png'])
graphic_links.append(['PCA2', 'PCA.png'])
graphic_links.append(['HC2', 'HC.png'])
graphic_links.append(['PCA3', 'PCA.png'])
graphic_links.append(['HC3', 'HC.png'])
graphic_links.append(['PCA4', 'PCA.png'])
graphic_links.append(['HC4', 'HC.png'])
summary_db = {}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test';
results_dir = ''
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self, null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
if __name__ == '__main__':
try:
mlp.freeze_support()
except Exception:
pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
0) (done) Integrate new network visualizationality in clustering
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
2) (demo) Splicing graph/isoform visualization
3) SQLite for gene-set databases prior to clustering and network visualization
4) (done) Gene-level correlation queries for clustering
5) (explored - not good) Optional algorithm type of PCA
6) (done) Optional normalization of expression data for clustering
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
8) (done) Venn diagram option
9) (done) Additional Analyses: (A) combine lists, (B) annotate ID list, (C) run marker finder directly, (D) any graph from table option, (E) network from SIF, (F) inference networks from gene-lists (protein-protein, protein-DNA, protein-splicing)
10) Optional denominator option for GO-Elite (create from input and ID system IDs)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
13) (done) Support additional ID types for initial import (ID select option and pulldown - Other)
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
16) (done) Make GO-Elite analysis the default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) (done) Include various gene databases for LineageProfiler in download and allow for custom databases to be used (markerFinder based)
20) (done) Quantile normalization option for any non-Affy, non-RNASeq data (check box)
21) (done) Import agilent from Feature extraction files (pull-down option)
22) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
2) Splicing-domain visualization (matplotlib).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
5) Stand-alone integration with bedtools, QC tools, TopHat, Cufflinks, Miso (optional).
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
"""
|
wuxue/altanalyze
|
AltAnalyze_LOCAL_6888.py
|
Python
|
apache-2.0
| 537,268
|
[
"Cytoscape"
] |
4964b808108db784d8846a2db6ab3cb86500cc82f301882a121e9a414f3a2d37
|
#! /bin/python
# $Id$
# -----------------------------------------------------------------------------
# CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-14 Bradley M. Bell
#
# CppAD is distributed under multiple licenses. This distribution is under
# the terms of the
# Eclipse Public License Version 1.0.
#
# A copy of this license is included in the COPYING file of this distribution.
# Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
# -----------------------------------------------------------------------------
import sys
import os
import re
# -----------------------------------------------------------------------------
if sys.argv[0] != 'bin/replace_html.py' :
msg = 'bin/replace_html.py: must be executed from its parent directory'
sys.exit(msg)
#
usage = '''\nusage: replace_html.py define_file replace_file new_file where
define_file: contains the define commands
replace_file: contains the replace commands (many be same as define_file)
new_file: is a copy of replace file with the replacements.
The definitions are specified by:
<!-- define name -->source<!-- end name -->
where name is any unique name, with no spaces ' ', for the replacement text
and source is the replacement text.
The replacement positions are specified by:
<!-- replace name -->desination<!-- end name -->
where name refers to a defined replacement text and destination
is the text that is replaced.
'''
narg = len(sys.argv)
if narg != 4 :
msg = '\nExpected 3 but found ' + str(narg-1) + ' command line arguments.'
sys.exit(usage + msg)
define_file = sys.argv[1]
replace_file = sys.argv[2]
new_file = sys.argv[3]
# -----------------------------------------------------------------------------
if not os.path.exists(define_file) :
msg = 'bin/replace_html.py: cannot find define_file = ' + define_file
sys.exit(msg)
if not os.path.exists(replace_file) :
msg = 'bin/replace_html.py: cannot find replace_file = ' + replace_file
sys.exit(msg)
if os.path.exists(new_file) :
msg = 'bin/replace_html.py: cannot overwrite new_file ' + new_file
sys.exit(msg)
f_in = open(define_file, 'rb')
define_data = f_in.read()
f_in.close()
f_in = open(replace_file, 'rb')
replace_data = f_in.read()
f_in.close()
# -----------------------------------------------------------------------------
# create define: a dictionary with replacement text definitions
define = {}
p_define = re.compile('<!-- define ([^ ]*) -->')
p_end = re.compile('<!-- end ([^ ]*) -->')
start = 0
while start < len(define_data) :
rest = define_data[start : ]
next_define = p_define.search(rest)
if next_define == None :
start = len(define_data)
else :
name = next_define.group(1)
if name in define :
msg = 'bin/replace_html.py: file = ' + define_file
msg += '\ncontains two defintions for name = ' + name
sys.exit(msg)
rest = rest[ next_define.end() : ]
#
next_end = p_end.search(rest)
source = rest [ 0 : next_end.start() ]
define[name] = source
start += next_define.end() + next_end.end()
if name != next_end.group(1) :
msg = 'bin/replace_html.py: file = ' + define_file
msg += '\ndefine name = ' + name
msg += ', end name = ' + next_end.group(1)
sys.exit(msg)
# -----------------------------------------------------------------------------
# create new_data: a string with the replacements made
new_data = ''
p_replace = re.compile('<!-- replace ([^ ]*) -->')
start = 0
while start < len(replace_data) :
rest = replace_data[start : ]
next_replace = p_replace.search(rest)
if next_replace == None :
new_data += rest
start = len(replace_data)
else :
name = next_replace.group(1)
if name not in define :
msg = 'bin/replace_html.py: file = ' + define_file
msg += '\ncontains no defintions for name = ' + name
sys.exit(msg)
new_data += rest[0 : next_replace.end() ]
new_data += define[name]
#
rest = rest[ next_replace.end() : ]
next_end = p_end.search(rest)
new_data += rest[ next_end.start() : next_end.end() ]
start += next_replace.end() + next_end.end()
if name != next_end.group(1) :
msg = 'bin/replace_html.py: file = ' + replace_file
msg += '\nreplace name = ' + name
msg += ', end name = ' + next_end.group(1)
sys.exit(msg)
# -----------------------------------------------------------------------------
f_out = open(new_file, 'wb')
f_out.write(new_data)
f_out.close()
# -----------------------------------------------------------------------------
sys.exit(0)
|
utke1/cppad
|
bin/replace_html.py
|
Python
|
epl-1.0
| 4,616
|
[
"VisIt"
] |
735e0c4681c2350ea7a040a5dc16dbd0ac6e629ea5f167990d91e1e4b27c68e5
|
# Orca
#
# Copyright (C) 2010 Joanmarie Diggs
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs" \
"Copyright (c) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.keynames as keynames
import orca.object_properties as object_properties
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speech_generator as speech_generator
_settingsManager = settings_manager.getManager()
########################################################################
# #
# Custom SpeechGenerator #
# #
########################################################################
class SpeechGenerator(speech_generator.SpeechGenerator):
"""Provides a speech generator specific to WebKitGtk widgets."""
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def getVoiceForString(self, obj, string, **args):
voice = settings.voices[settings.DEFAULT_VOICE]
if string.isupper():
voice = settings.voices[settings.UPPERCASE_VOICE]
return voice
def _generateLabel(self, obj, **args):
result = super()._generateLabel(obj, **args)
if result or not self._script.utilities.isWebKitGtk(obj):
return result
role = args.get('role', obj.getRole())
inferRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_RADIO_BUTTON]
if not role in inferRoles:
return result
label, objects = self._script.labelInference.infer(obj)
if label:
result.append(label)
result.extend(self.voice(speech_generator.DEFAULT))
return result
def __generateHeadingRole(self, obj):
result = []
role = pyatspi.ROLE_HEADING
level = self._script.utilities.headingLevel(obj)
if level:
result.append(object_properties.ROLE_HEADING_LEVEL_SPEECH % {
'role': self.getLocalizedRoleName(obj, role),
'level': level})
else:
result.append(self.getLocalizedRoleName(obj, role))
return result
def _generateRoleName(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(speech_generator.SYSTEM)
role = args.get('role', obj.getRole())
force = args.get('force', False)
doNotSpeak = [pyatspi.ROLE_UNKNOWN]
if not force:
doNotSpeak.extend([pyatspi.ROLE_FORM,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL])
if not (role in doNotSpeak):
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if role == pyatspi.ROLE_IMAGE:
link = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_LINK], docRoles)
if link:
result.append(self.getLocalizedRoleName(link))
elif role == pyatspi.ROLE_HEADING:
result.extend(self.__generateHeadingRole(obj))
else:
result.append(self.getLocalizedRoleName(obj, role))
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_HEADING:
result.extend(self.__generateHeadingRole(obj.parent))
if result:
result.extend(acss)
if role == pyatspi.ROLE_LINK \
and obj.childCount and obj[0].getRole() == pyatspi.ROLE_IMAGE:
# If this is a link with a child which is an image, we
# want to indicate that.
#
acss = self.voice(speech_generator.HYPERLINK)
result.append(self.getLocalizedRoleName(obj[0]))
result.extend(acss)
return result
def _generateAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_LINK:
return []
args['stopAtRoles'] = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_FORM,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_TOOL_BAR]
args['skipRoles'] = [pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT]
return speech_generator.SpeechGenerator._generateAncestors(
self, obj, **args)
def _generateMnemonic(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the mnemonic for the object, or
an empty array if no mnemonic can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not (_settingsManager.getSetting('enableMnemonicSpeaking') \
or args.get('forceMnemonic', False)):
return []
if not self._script.utilities.isWebKitGtk(obj):
return speech_generator.SpeechGenerator._generateMnemonic(
self, obj, **args)
result = []
acss = self.voice(speech_generator.SYSTEM)
mnemonic, shortcut, accelerator = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if shortcut:
if _settingsManager.getSetting('speechVerbosityLevel') == \
settings.VERBOSITY_LEVEL_VERBOSE:
shortcut = 'Alt Shift %s' % shortcut
result = [keynames.localizeKeySequence(shortcut)]
result.extend(acss)
return result
|
pvagner/orca
|
src/orca/scripts/toolkits/WebKitGtk/speech_generator.py
|
Python
|
lgpl-2.1
| 7,899
|
[
"ORCA"
] |
fe7f993a02398be306f09ce6379847401a7913f9e1a278ee604db11b64fa8eed
|
#
# This file is based on emoji (https://github.com/kyokomi/emoji).
#
# The MIT License (MIT)
#
# Copyright (c) 2014 kyokomi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
emojiCodeDict = {
":capricorn:": u"\U00002651",
":end:": u"\U0001f51a",
":no_mobile_phones:": u"\U0001f4f5",
":couple:": u"\U0001f46b",
":snowman:": u"\U000026c4",
":sunrise_over_mountains:": u"\U0001f304",
":suspension_railway:": u"\U0001f69f",
":arrows_counterclockwise:": u"\U0001f504",
":bug:": u"\U0001f41b",
":confused:": u"\U0001f615",
":dress:": u"\U0001f457",
":honeybee:": u"\U0001f41d",
":waning_crescent_moon:": u"\U0001f318",
":balloon:": u"\U0001f388",
":bus:": u"\U0001f68c",
":package:": u"\U0001f4e6",
":pencil2:": u"\U0000270f",
":rage:": u"\U0001f621",
":space_invader:": u"\U0001f47e",
":white_medium_small_square:": u"\U000025fd",
":fast_forward:": u"\U000023e9",
":rice_cracker:": u"\U0001f358",
":incoming_envelope:": u"\U0001f4e8",
":sa:": u"\U0001f202",
":womens:": u"\U0001f6ba",
":arrow_right:": u"\U000027a1",
":construction_worker:": u"\U0001f477",
":notes:": u"\U0001f3b6",
":goat:": u"\U0001f410",
":grey_question:": u"\U00002754",
":lantern:": u"\U0001f3ee",
":rice_scene:": u"\U0001f391",
":running:": u"\U0001f3c3",
":ferris_wheel:": u"\U0001f3a1",
":musical_score:": u"\U0001f3bc",
":sparkle:": u"\U00002747",
":wink:": u"\U0001f609",
":art:": u"\U0001f3a8",
":clock330:": u"\U0001f55e",
":minidisc:": u"\U0001f4bd",
":no_entry_sign:": u"\U0001f6ab",
":wind_chime:": u"\U0001f390",
":cyclone:": u"\U0001f300",
":herb:": u"\U0001f33f",
":leopard:": u"\U0001f406",
":banana:": u"\U0001f34c",
":handbag:": u"\U0001f45c",
":honey_pot:": u"\U0001f36f",
":ok:": u"\U0001f197",
":hearts:": u"\U00002665",
":passport_control:": u"\U0001f6c2",
":moyai:": u"\U0001f5ff",
":smile:": u"\U0001f604",
":tiger2:": u"\U0001f405",
":twisted_rightwards_arrows:": u"\U0001f500",
":children_crossing:": u"\U0001f6b8",
":cow:": u"\U0001f42e",
":point_up:": u"\U0000261d",
":house:": u"\U0001f3e0",
":man_with_turban:": u"\U0001f473",
":mountain_railway:": u"\U0001f69e",
":vibration_mode:": u"\U0001f4f3",
":blowfish:": u"\U0001f421",
":it:": u"\U0001f1ee\U0001f1f9",
":oden:": u"\U0001f362",
":clock3:": u"\U0001f552",
":lollipop:": u"\U0001f36d",
":train:": u"\U0001f68b",
":scissors:": u"\U00002702",
":triangular_ruler:": u"\U0001f4d0",
":wedding:": u"\U0001f492",
":flashlight:": u"\U0001f526",
":secret:": u"\U00003299",
":sushi:": u"\U0001f363",
":blue_car:": u"\U0001f699",
":cd:": u"\U0001f4bf",
":milky_way:": u"\U0001f30c",
":mortar_board:": u"\U0001f393",
":crown:": u"\U0001f451",
":speech_balloon:": u"\U0001f4ac",
":bento:": u"\U0001f371",
":grey_exclamation:": u"\U00002755",
":hotel:": u"\U0001f3e8",
":keycap_ten:": u"\U0001f51f",
":newspaper:": u"\U0001f4f0",
":outbox_tray:": u"\U0001f4e4",
":racehorse:": u"\U0001f40e",
":laughing:": u"\U0001f606",
":black_large_square:": u"\U00002b1b",
":books:": u"\U0001f4da",
":eight_spoked_asterisk:": u"\U00002733",
":heavy_check_mark:": u"\U00002714",
":m:": u"\U000024c2",
":wave:": u"\U0001f44b",
":bicyclist:": u"\U0001f6b4",
":cocktail:": u"\U0001f378",
":european_castle:": u"\U0001f3f0",
":point_down:": u"\U0001f447",
":tokyo_tower:": u"\U0001f5fc",
":battery:": u"\U0001f50b",
":dancer:": u"\U0001f483",
":repeat:": u"\U0001f501",
":ru:": u"\U0001f1f7\U0001f1fa",
":new_moon:": u"\U0001f311",
":church:": u"\U000026ea",
":date:": u"\U0001f4c5",
":earth_americas:": u"\U0001f30e",
":footprints:": u"\U0001f463",
":libra:": u"\U0000264e",
":mountain_cableway:": u"\U0001f6a0",
":small_red_triangle_down:": u"\U0001f53b",
":top:": u"\U0001f51d",
":sunglasses:": u"\U0001f60e",
":abcd:": u"\U0001f521",
":cl:": u"\U0001f191",
":ski:": u"\U0001f3bf",
":book:": u"\U0001f4d6",
":hourglass_flowing_sand:": u"\U000023f3",
":stuck_out_tongue_closed_eyes:": u"\U0001f61d",
":cold_sweat:": u"\U0001f630",
":headphones:": u"\U0001f3a7",
":confetti_ball:": u"\U0001f38a",
":gemini:": u"\U0000264a",
":new:": u"\U0001f195",
":pray:": u"\U0001f64f",
":watch:": u"\U0000231a",
":coffee:": u"\U00002615",
":ghost:": u"\U0001f47b",
":on:": u"\U0001f51b",
":pouch:": u"\U0001f45d",
":taxi:": u"\U0001f695",
":hocho:": u"\U0001f52a",
":yum:": u"\U0001f60b",
":heavy_plus_sign:": u"\U00002795",
":tada:": u"\U0001f389",
":arrow_heading_down:": u"\U00002935",
":clock530:": u"\U0001f560",
":poultry_leg:": u"\U0001f357",
":elephant:": u"\U0001f418",
":gb:": u"\U0001f1ec\U0001f1e7",
":mahjong:": u"\U0001f004",
":rice:": u"\U0001f35a",
":musical_note:": u"\U0001f3b5",
":beginner:": u"\U0001f530",
":small_red_triangle:": u"\U0001f53a",
":tomato:": u"\U0001f345",
":clock1130:": u"\U0001f566",
":japanese_castle:": u"\U0001f3ef",
":sun_with_face:": u"\U0001f31e",
":four:": u"\U00000034\U000020e3",
":microphone:": u"\U0001f3a4",
":tennis:": u"\U0001f3be",
":arrow_up_down:": u"\U00002195",
":cn:": u"\U0001f1e8\U0001f1f3",
":horse_racing:": u"\U0001f3c7",
":no_bicycles:": u"\U0001f6b3",
":snail:": u"\U0001f40c",
":free:": u"\U0001f193",
":beetle:": u"\U0001f41e",
":black_small_square:": u"\U000025aa",
":file_folder:": u"\U0001f4c1",
":hushed:": u"\U0001f62f",
":skull:": u"\U0001f480",
":ab:": u"\U0001f18e",
":rocket:": u"\U0001f680",
":sweet_potato:": u"\U0001f360",
":guitar:": u"\U0001f3b8",
":poodle:": u"\U0001f429",
":tulip:": u"\U0001f337",
":large_orange_diamond:": u"\U0001f536",
":-1:": u"\U0001f44e",
":chart_with_upwards_trend:": u"\U0001f4c8",
":de:": u"\U0001f1e9\U0001f1ea",
":grapes:": u"\U0001f347",
":ideograph_advantage:": u"\U0001f250",
":japanese_ogre:": u"\U0001f479",
":telephone:": u"\U0000260e",
":clock230:": u"\U0001f55d",
":hourglass:": u"\U0000231b",
":leftwards_arrow_with_hook:": u"\U000021a9",
":sparkler:": u"\U0001f387",
":black_joker:": u"\U0001f0cf",
":clock730:": u"\U0001f562",
":first_quarter_moon_with_face:": u"\U0001f31b",
":man:": u"\U0001f468",
":clock4:": u"\U0001f553",
":fishing_pole_and_fish:": u"\U0001f3a3",
":tophat:": u"\U0001f3a9",
":white_medium_square:": u"\U000025fb",
":mega:": u"\U0001f4e3",
":spaghetti:": u"\U0001f35d",
":dart:": u"\U0001f3af",
":girl:": u"\U0001f467",
":womans_hat:": u"\U0001f452",
":bullettrain_front:": u"\U0001f685",
":department_store:": u"\U0001f3ec",
":heartbeat:": u"\U0001f493",
":palm_tree:": u"\U0001f334",
":swimmer:": u"\U0001f3ca",
":yellow_heart:": u"\U0001f49b",
":arrow_upper_right:": u"\U00002197",
":clock2:": u"\U0001f551",
":high_heel:": u"\U0001f460",
":arrow_double_up:": u"\U000023eb",
":cry:": u"\U0001f622",
":dvd:": u"\U0001f4c0",
":e-mail:": u"\U0001f4e7",
":baby_bottle:": u"\U0001f37c",
":cool:": u"\U0001f192",
":floppy_disk:": u"\U0001f4be",
":iphone:": u"\U0001f4f1",
":minibus:": u"\U0001f690",
":rooster:": u"\U0001f413",
":three:": u"\U00000033\U000020e3",
":white_small_square:": u"\U000025ab",
":cancer:": u"\U0000264b",
":question:": u"\U00002753",
":sake:": u"\U0001f376",
":birthday:": u"\U0001f382",
":dog2:": u"\U0001f415",
":loudspeaker:": u"\U0001f4e2",
":arrow_up_small:": u"\U0001f53c",
":camel:": u"\U0001f42b",
":koala:": u"\U0001f428",
":mag_right:": u"\U0001f50e",
":soccer:": u"\U000026bd",
":bike:": u"\U0001f6b2",
":ear_of_rice:": u"\U0001f33e",
":shit:": u"\U0001f4a9",
":u7981:": u"\U0001f232",
":bath:": u"\U0001f6c0",
":baby:": u"\U0001f476",
":lock_with_ink_pen:": u"\U0001f50f",
":necktie:": u"\U0001f454",
":bikini:": u"\U0001f459",
":blush:": u"\U0001f60a",
":heartpulse:": u"\U0001f497",
":pig_nose:": u"\U0001f43d",
":straight_ruler:": u"\U0001f4cf",
":u6e80:": u"\U0001f235",
":gift:": u"\U0001f381",
":traffic_light:": u"\U0001f6a5",
":hibiscus:": u"\U0001f33a",
":couple_with_heart:": u"\U0001f491",
":pushpin:": u"\U0001f4cc",
":u6709:": u"\U0001f236",
":walking:": u"\U0001f6b6",
":grinning:": u"\U0001f600",
":hash:": u"\U00000023\U000020e3",
":radio_button:": u"\U0001f518",
":raised_hand:": u"\U0000270b",
":shaved_ice:": u"\U0001f367",
":barber:": u"\U0001f488",
":cat:": u"\U0001f431",
":heavy_exclamation_mark:": u"\U00002757",
":ice_cream:": u"\U0001f368",
":mask:": u"\U0001f637",
":pig2:": u"\U0001f416",
":triangular_flag_on_post:": u"\U0001f6a9",
":arrow_upper_left:": u"\U00002196",
":bee:": u"\U0001f41d",
":beer:": u"\U0001f37a",
":black_nib:": u"\U00002712",
":exclamation:": u"\U00002757",
":dog:": u"\U0001f436",
":fire:": u"\U0001f525",
":ant:": u"\U0001f41c",
":broken_heart:": u"\U0001f494",
":chart:": u"\U0001f4b9",
":clock1:": u"\U0001f550",
":bomb:": u"\U0001f4a3",
":virgo:": u"\U0000264d",
":a:": u"\U0001f170",
":fork_and_knife:": u"\U0001f374",
":copyright:": u"\U000000a9",
":curly_loop:": u"\U000027b0",
":full_moon:": u"\U0001f315",
":shoe:": u"\U0001f45e",
":european_post_office:": u"\U0001f3e4",
":ng:": u"\U0001f196",
":office:": u"\U0001f3e2",
":raising_hand:": u"\U0001f64b",
":revolving_hearts:": u"\U0001f49e",
":aquarius:": u"\U00002652",
":electric_plug:": u"\U0001f50c",
":meat_on_bone:": u"\U0001f356",
":mens:": u"\U0001f6b9",
":briefcase:": u"\U0001f4bc",
":ship:": u"\U0001f6a2",
":anchor:": u"\U00002693",
":ballot_box_with_check:": u"\U00002611",
":bear:": u"\U0001f43b",
":beers:": u"\U0001f37b",
":dromedary_camel:": u"\U0001f42a",
":nut_and_bolt:": u"\U0001f529",
":construction:": u"\U0001f6a7",
":golf:": u"\U000026f3",
":toilet:": u"\U0001f6bd",
":blue_book:": u"\U0001f4d8",
":boom:": u"\U0001f4a5",
":deciduous_tree:": u"\U0001f333",
":kissing_closed_eyes:": u"\U0001f61a",
":smiley_cat:": u"\U0001f63a",
":fuelpump:": u"\U000026fd",
":kiss:": u"\U0001f48b",
":clock10:": u"\U0001f559",
":sheep:": u"\U0001f411",
":white_flower:": u"\U0001f4ae",
":boar:": u"\U0001f417",
":currency_exchange:": u"\U0001f4b1",
":facepunch:": u"\U0001f44a",
":flower_playing_cards:": u"\U0001f3b4",
":person_frowning:": u"\U0001f64d",
":poop:": u"\U0001f4a9",
":satisfied:": u"\U0001f606",
":8ball:": u"\U0001f3b1",
":disappointed_relieved:": u"\U0001f625",
":panda_face:": u"\U0001f43c",
":ticket:": u"\U0001f3ab",
":us:": u"\U0001f1fa\U0001f1f8",
":waxing_crescent_moon:": u"\U0001f312",
":dragon:": u"\U0001f409",
":gun:": u"\U0001f52b",
":mount_fuji:": u"\U0001f5fb",
":new_moon_with_face:": u"\U0001f31a",
":star2:": u"\U0001f31f",
":grimacing:": u"\U0001f62c",
":confounded:": u"\U0001f616",
":congratulations:": u"\U00003297",
":custard:": u"\U0001f36e",
":frowning:": u"\U0001f626",
":maple_leaf:": u"\U0001f341",
":police_car:": u"\U0001f693",
":cloud:": u"\U00002601",
":jeans:": u"\U0001f456",
":fish:": u"\U0001f41f",
":wavy_dash:": u"\U00003030",
":clock5:": u"\U0001f554",
":santa:": u"\U0001f385",
":japan:": u"\U0001f5fe",
":oncoming_taxi:": u"\U0001f696",
":whale:": u"\U0001f433",
":arrow_forward:": u"\U000025b6",
":kissing_heart:": u"\U0001f618",
":bullettrain_side:": u"\U0001f684",
":fearful:": u"\U0001f628",
":moneybag:": u"\U0001f4b0",
":runner:": u"\U0001f3c3",
":mailbox:": u"\U0001f4eb",
":sandal:": u"\U0001f461",
":zzz:": u"\U0001f4a4",
":apple:": u"\U0001f34e",
":arrow_heading_up:": u"\U00002934",
":family:": u"\U0001f46a",
":heavy_minus_sign:": u"\U00002796",
":saxophone:": u"\U0001f3b7",
":u5272:": u"\U0001f239",
":black_square_button:": u"\U0001f532",
":bouquet:": u"\U0001f490",
":love_letter:": u"\U0001f48c",
":metro:": u"\U0001f687",
":small_blue_diamond:": u"\U0001f539",
":thought_balloon:": u"\U0001f4ad",
":arrow_up:": u"\U00002b06",
":no_pedestrians:": u"\U0001f6b7",
":smirk:": u"\U0001f60f",
":blue_heart:": u"\U0001f499",
":large_blue_diamond:": u"\U0001f537",
":vs:": u"\U0001f19a",
":v:": u"\U0000270c",
":wheelchair:": u"\U0000267f",
":couplekiss:": u"\U0001f48f",
":tent:": u"\U000026fa",
":purple_heart:": u"\U0001f49c",
":relaxed:": u"\U0000263a",
":accept:": u"\U0001f251",
":green_heart:": u"\U0001f49a",
":pouting_cat:": u"\U0001f63e",
":tram:": u"\U0001f68a",
":bangbang:": u"\U0000203c",
":collision:": u"\U0001f4a5",
":convenience_store:": u"\U0001f3ea",
":person_with_blond_hair:": u"\U0001f471",
":uk:": u"\U0001f1ec\U0001f1e7",
":peach:": u"\U0001f351",
":tired_face:": u"\U0001f62b",
":bread:": u"\U0001f35e",
":mailbox_closed:": u"\U0001f4ea",
":open_mouth:": u"\U0001f62e",
":pig:": u"\U0001f437",
":put_litter_in_its_place:": u"\U0001f6ae",
":u7a7a:": u"\U0001f233",
":bulb:": u"\U0001f4a1",
":clock9:": u"\U0001f558",
":envelope_with_arrow:": u"\U0001f4e9",
":pisces:": u"\U00002653",
":baggage_claim:": u"\U0001f6c4",
":egg:": u"\U0001f373",
":sweat_smile:": u"\U0001f605",
":boat:": u"\U000026f5",
":fr:": u"\U0001f1eb\U0001f1f7",
":heavy_division_sign:": u"\U00002797",
":muscle:": u"\U0001f4aa",
":paw_prints:": u"\U0001f43e",
":arrow_left:": u"\U00002b05",
":black_circle:": u"\U000026ab",
":kissing_smiling_eyes:": u"\U0001f619",
":star:": u"\U00002b50",
":steam_locomotive:": u"\U0001f682",
":1234:": u"\U0001f522",
":clock130:": u"\U0001f55c",
":kr:": u"\U0001f1f0\U0001f1f7",
":monorail:": u"\U0001f69d",
":school:": u"\U0001f3eb",
":seven:": u"\U00000037\U000020e3",
":baby_chick:": u"\U0001f424",
":bridge_at_night:": u"\U0001f309",
":hotsprings:": u"\U00002668",
":rose:": u"\U0001f339",
":love_hotel:": u"\U0001f3e9",
":princess:": u"\U0001f478",
":ramen:": u"\U0001f35c",
":scroll:": u"\U0001f4dc",
":tropical_fish:": u"\U0001f420",
":heart_eyes_cat:": u"\U0001f63b",
":information_desk_person:": u"\U0001f481",
":mouse:": u"\U0001f42d",
":no_smoking:": u"\U0001f6ad",
":post_office:": u"\U0001f3e3",
":stars:": u"\U0001f320",
":arrow_double_down:": u"\U000023ec",
":unlock:": u"\U0001f513",
":arrow_backward:": u"\U000025c0",
":hand:": u"\U0000270b",
":hospital:": u"\U0001f3e5",
":ocean:": u"\U0001f30a",
":mountain_bicyclist:": u"\U0001f6b5",
":octopus:": u"\U0001f419",
":sos:": u"\U0001f198",
":dizzy_face:": u"\U0001f635",
":tongue:": u"\U0001f445",
":train2:": u"\U0001f686",
":checkered_flag:": u"\U0001f3c1",
":orange_book:": u"\U0001f4d9",
":sound:": u"\U0001f509",
":aerial_tramway:": u"\U0001f6a1",
":bell:": u"\U0001f514",
":dragon_face:": u"\U0001f432",
":flipper:": u"\U0001f42c",
":ok_woman:": u"\U0001f646",
":performing_arts:": u"\U0001f3ad",
":postal_horn:": u"\U0001f4ef",
":clock1030:": u"\U0001f565",
":email:": u"\U00002709",
":green_book:": u"\U0001f4d7",
":point_up_2:": u"\U0001f446",
":high_brightness:": u"\U0001f506",
":running_shirt_with_sash:": u"\U0001f3bd",
":bookmark:": u"\U0001f516",
":sob:": u"\U0001f62d",
":arrow_lower_right:": u"\U00002198",
":point_left:": u"\U0001f448",
":purse:": u"\U0001f45b",
":sparkles:": u"\U00002728",
":black_medium_small_square:": u"\U000025fe",
":pound:": u"\U0001f4b7",
":rabbit:": u"\U0001f430",
":woman:": u"\U0001f469",
":negative_squared_cross_mark:": u"\U0000274e",
":open_book:": u"\U0001f4d6",
":smiling_imp:": u"\U0001f608",
":spades:": u"\U00002660",
":baseball:": u"\U000026be",
":fountain:": u"\U000026f2",
":joy:": u"\U0001f602",
":lipstick:": u"\U0001f484",
":partly_sunny:": u"\U000026c5",
":ram:": u"\U0001f40f",
":red_circle:": u"\U0001f534",
":cop:": u"\U0001f46e",
":green_apple:": u"\U0001f34f",
":registered:": u"\U000000ae",
":+1:": u"\U0001f44d",
":crying_cat_face:": u"\U0001f63f",
":innocent:": u"\U0001f607",
":mobile_phone_off:": u"\U0001f4f4",
":underage:": u"\U0001f51e",
":dolphin:": u"\U0001f42c",
":busts_in_silhouette:": u"\U0001f465",
":umbrella:": u"\U00002614",
":angel:": u"\U0001f47c",
":small_orange_diamond:": u"\U0001f538",
":sunflower:": u"\U0001f33b",
":link:": u"\U0001f517",
":notebook:": u"\U0001f4d3",
":oncoming_bus:": u"\U0001f68d",
":bookmark_tabs:": u"\U0001f4d1",
":calendar:": u"\U0001f4c6",
":izakaya_lantern:": u"\U0001f3ee",
":mans_shoe:": u"\U0001f45e",
":name_badge:": u"\U0001f4db",
":closed_lock_with_key:": u"\U0001f510",
":fist:": u"\U0000270a",
":id:": u"\U0001f194",
":ambulance:": u"\U0001f691",
":musical_keyboard:": u"\U0001f3b9",
":ribbon:": u"\U0001f380",
":seedling:": u"\U0001f331",
":tv:": u"\U0001f4fa",
":football:": u"\U0001f3c8",
":nail_care:": u"\U0001f485",
":seat:": u"\U0001f4ba",
":alarm_clock:": u"\U000023f0",
":money_with_wings:": u"\U0001f4b8",
":relieved:": u"\U0001f60c",
":womans_clothes:": u"\U0001f45a",
":lips:": u"\U0001f444",
":clubs:": u"\U00002663",
":house_with_garden:": u"\U0001f3e1",
":sunrise:": u"\U0001f305",
":monkey:": u"\U0001f412",
":six:": u"\U00000036\U000020e3",
":smiley:": u"\U0001f603",
":feet:": u"\U0001f43e",
":waning_gibbous_moon:": u"\U0001f316",
":yen:": u"\U0001f4b4",
":baby_symbol:": u"\U0001f6bc",
":signal_strength:": u"\U0001f4f6",
":boy:": u"\U0001f466",
":busstop:": u"\U0001f68f",
":computer:": u"\U0001f4bb",
":night_with_stars:": u"\U0001f303",
":older_woman:": u"\U0001f475",
":parking:": u"\U0001f17f",
":trumpet:": u"\U0001f3ba",
":100:": u"\U0001f4af",
":sweat_drops:": u"\U0001f4a6",
":wc:": u"\U0001f6be",
":b:": u"\U0001f171",
":cupid:": u"\U0001f498",
":five:": u"\U00000035\U000020e3",
":part_alternation_mark:": u"\U0000303d",
":snowboarder:": u"\U0001f3c2",
":warning:": u"\U000026a0",
":white_large_square:": u"\U00002b1c",
":zap:": u"\U000026a1",
":arrow_down_small:": u"\U0001f53d",
":clock430:": u"\U0001f55f",
":expressionless:": u"\U0001f611",
":phone:": u"\U0000260e",
":roller_coaster:": u"\U0001f3a2",
":lemon:": u"\U0001f34b",
":one:": u"\U00000031\U000020e3",
":christmas_tree:": u"\U0001f384",
":hankey:": u"\U0001f4a9",
":hatched_chick:": u"\U0001f425",
":u7533:": u"\U0001f238",
":large_blue_circle:": u"\U0001f535",
":up:": u"\U0001f199",
":wine_glass:": u"\U0001f377",
":x:": u"\U0000274c",
":nose:": u"\U0001f443",
":rewind:": u"\U000023ea",
":two_hearts:": u"\U0001f495",
":envelope:": u"\U00002709",
":oncoming_automobile:": u"\U0001f698",
":ophiuchus:": u"\U000026ce",
":ring:": u"\U0001f48d",
":tropical_drink:": u"\U0001f379",
":turtle:": u"\U0001f422",
":crescent_moon:": u"\U0001f319",
":koko:": u"\U0001f201",
":microscope:": u"\U0001f52c",
":rugby_football:": u"\U0001f3c9",
":smoking:": u"\U0001f6ac",
":anger:": u"\U0001f4a2",
":aries:": u"\U00002648",
":city_sunset:": u"\U0001f306",
":clock1230:": u"\U0001f567",
":mailbox_with_no_mail:": u"\U0001f4ed",
":movie_camera:": u"\U0001f3a5",
":pager:": u"\U0001f4df",
":zero:": u"\U00000030\U000020e3",
":bank:": u"\U0001f3e6",
":eight_pointed_black_star:": u"\U00002734",
":knife:": u"\U0001f52a",
":u7121:": u"\U0001f21a",
":customs:": u"\U0001f6c3",
":melon:": u"\U0001f348",
":rowboat:": u"\U0001f6a3",
":corn:": u"\U0001f33d",
":eggplant:": u"\U0001f346",
":heart_decoration:": u"\U0001f49f",
":rotating_light:": u"\U0001f6a8",
":round_pushpin:": u"\U0001f4cd",
":cat2:": u"\U0001f408",
":chocolate_bar:": u"\U0001f36b",
":no_bell:": u"\U0001f515",
":radio:": u"\U0001f4fb",
":droplet:": u"\U0001f4a7",
":hamburger:": u"\U0001f354",
":fire_engine:": u"\U0001f692",
":heart:": u"\U00002764",
":potable_water:": u"\U0001f6b0",
":telephone_receiver:": u"\U0001f4de",
":dash:": u"\U0001f4a8",
":globe_with_meridians:": u"\U0001f310",
":guardsman:": u"\U0001f482",
":heavy_multiplication_x:": u"\U00002716",
":chart_with_downwards_trend:": u"\U0001f4c9",
":imp:": u"\U0001f47f",
":earth_asia:": u"\U0001f30f",
":mouse2:": u"\U0001f401",
":notebook_with_decorative_cover:": u"\U0001f4d4",
":telescope:": u"\U0001f52d",
":trolleybus:": u"\U0001f68e",
":card_index:": u"\U0001f4c7",
":euro:": u"\U0001f4b6",
":dollar:": u"\U0001f4b5",
":fax:": u"\U0001f4e0",
":mailbox_with_mail:": u"\U0001f4ec",
":raised_hands:": u"\U0001f64c",
":disappointed:": u"\U0001f61e",
":foggy:": u"\U0001f301",
":person_with_pouting_face:": u"\U0001f64e",
":statue_of_liberty:": u"\U0001f5fd",
":dolls:": u"\U0001f38e",
":light_rail:": u"\U0001f688",
":pencil:": u"\U0001f4dd",
":speak_no_evil:": u"\U0001f64a",
":calling:": u"\U0001f4f2",
":clock830:": u"\U0001f563",
":cow2:": u"\U0001f404",
":hear_no_evil:": u"\U0001f649",
":scream_cat:": u"\U0001f640",
":smile_cat:": u"\U0001f638",
":tractor:": u"\U0001f69c",
":clock11:": u"\U0001f55a",
":doughnut:": u"\U0001f369",
":hammer:": u"\U0001f528",
":loop:": u"\U000027bf",
":moon:": u"\U0001f314",
":soon:": u"\U0001f51c",
":cinema:": u"\U0001f3a6",
":factory:": u"\U0001f3ed",
":flushed:": u"\U0001f633",
":mute:": u"\U0001f507",
":neutral_face:": u"\U0001f610",
":scorpius:": u"\U0000264f",
":wolf:": u"\U0001f43a",
":clapper:": u"\U0001f3ac",
":joy_cat:": u"\U0001f639",
":pensive:": u"\U0001f614",
":sleeping:": u"\U0001f634",
":credit_card:": u"\U0001f4b3",
":leo:": u"\U0000264c",
":man_with_gua_pi_mao:": u"\U0001f472",
":open_hands:": u"\U0001f450",
":tea:": u"\U0001f375",
":arrow_down:": u"\U00002b07",
":nine:": u"\U00000039\U000020e3",
":punch:": u"\U0001f44a",
":slot_machine:": u"\U0001f3b0",
":clap:": u"\U0001f44f",
":information_source:": u"\U00002139",
":tiger:": u"\U0001f42f",
":city_sunrise:": u"\U0001f307",
":dango:": u"\U0001f361",
":thumbsdown:": u"\U0001f44e",
":u6307:": u"\U0001f22f",
":curry:": u"\U0001f35b",
":cherries:": u"\U0001f352",
":clock6:": u"\U0001f555",
":clock7:": u"\U0001f556",
":older_man:": u"\U0001f474",
":oncoming_police_car:": u"\U0001f694",
":syringe:": u"\U0001f489",
":heavy_dollar_sign:": u"\U0001f4b2",
":open_file_folder:": u"\U0001f4c2",
":arrow_right_hook:": u"\U000021aa",
":articulated_lorry:": u"\U0001f69b",
":dancers:": u"\U0001f46f",
":kissing_cat:": u"\U0001f63d",
":rainbow:": u"\U0001f308",
":u5408:": u"\U0001f234",
":boot:": u"\U0001f462",
":carousel_horse:": u"\U0001f3a0",
":fried_shrimp:": u"\U0001f364",
":lock:": u"\U0001f512",
":non-potable_water:": u"\U0001f6b1",
":o:": u"\U00002b55",
":persevere:": u"\U0001f623",
":diamond_shape_with_a_dot_inside:": u"\U0001f4a0",
":fallen_leaf:": u"\U0001f342",
":massage:": u"\U0001f486",
":volcano:": u"\U0001f30b",
":gem:": u"\U0001f48e",
":shower:": u"\U0001f6bf",
":speaker:": u"\U0001f508",
":last_quarter_moon_with_face:": u"\U0001f31c",
":mag:": u"\U0001f50d",
":anguished:": u"\U0001f627",
":monkey_face:": u"\U0001f435",
":sunny:": u"\U00002600",
":tangerine:": u"\U0001f34a",
":point_right:": u"\U0001f449",
":railway_car:": u"\U0001f683",
":triumph:": u"\U0001f624",
":two:": u"\U00000032\U000020e3",
":gift_heart:": u"\U0001f49d",
":ledger:": u"\U0001f4d2",
":sagittarius:": u"\U00002650",
":snowflake:": u"\U00002744",
":abc:": u"\U0001f524",
":horse:": u"\U0001f434",
":ok_hand:": u"\U0001f44c",
":video_camera:": u"\U0001f4f9",
":sparkling_heart:": u"\U0001f496",
":taurus:": u"\U00002649",
":frog:": u"\U0001f438",
":hamster:": u"\U0001f439",
":helicopter:": u"\U0001f681",
":fries:": u"\U0001f35f",
":mushroom:": u"\U0001f344",
":penguin:": u"\U0001f427",
":truck:": u"\U0001f69a",
":bar_chart:": u"\U0001f4ca",
":evergreen_tree:": u"\U0001f332",
":bow:": u"\U0001f647",
":clock12:": u"\U0001f55b",
":four_leaf_clover:": u"\U0001f340",
":inbox_tray:": u"\U0001f4e5",
":smirk_cat:": u"\U0001f63c",
":two_men_holding_hands:": u"\U0001f46c",
":water_buffalo:": u"\U0001f403",
":alien:": u"\U0001f47d",
":video_game:": u"\U0001f3ae",
":candy:": u"\U0001f36c",
":page_facing_up:": u"\U0001f4c4",
":watermelon:": u"\U0001f349",
":white_check_mark:": u"\U00002705",
":blossom:": u"\U0001f33c",
":crocodile:": u"\U0001f40a",
":no_mouth:": u"\U0001f636",
":o2:": u"\U0001f17e",
":shirt:": u"\U0001f455",
":clock8:": u"\U0001f557",
":eyes:": u"\U0001f440",
":rabbit2:": u"\U0001f407",
":tanabata_tree:": u"\U0001f38b",
":wrench:": u"\U0001f527",
":es:": u"\U0001f1ea\U0001f1f8",
":trophy:": u"\U0001f3c6",
":two_women_holding_hands:": u"\U0001f46d",
":clock630:": u"\U0001f561",
":pineapple:": u"\U0001f34d",
":stuck_out_tongue:": u"\U0001f61b",
":angry:": u"\U0001f620",
":athletic_shoe:": u"\U0001f45f",
":cookie:": u"\U0001f36a",
":flags:": u"\U0001f38f",
":game_die:": u"\U0001f3b2",
":bird:": u"\U0001f426",
":jack_o_lantern:": u"\U0001f383",
":ox:": u"\U0001f402",
":paperclip:": u"\U0001f4ce",
":sleepy:": u"\U0001f62a",
":astonished:": u"\U0001f632",
":back:": u"\U0001f519",
":closed_book:": u"\U0001f4d5",
":hatching_chick:": u"\U0001f423",
":arrows_clockwise:": u"\U0001f503",
":car:": u"\U0001f697",
":ear:": u"\U0001f442",
":haircut:": u"\U0001f487",
":icecream:": u"\U0001f366",
":bust_in_silhouette:": u"\U0001f464",
":diamonds:": u"\U00002666",
":no_good:": u"\U0001f645",
":pizza:": u"\U0001f355",
":chicken:": u"\U0001f414",
":eyeglasses:": u"\U0001f453",
":see_no_evil:": u"\U0001f648",
":earth_africa:": u"\U0001f30d",
":fireworks:": u"\U0001f386",
":page_with_curl:": u"\U0001f4c3",
":rice_ball:": u"\U0001f359",
":white_square_button:": u"\U0001f533",
":cake:": u"\U0001f370",
":red_car:": u"\U0001f697",
":tm:": u"\U00002122",
":unamused:": u"\U0001f612",
":fish_cake:": u"\U0001f365",
":key:": u"\U0001f511",
":speedboat:": u"\U0001f6a4",
":closed_umbrella:": u"\U0001f302",
":pear:": u"\U0001f350",
":satellite:": u"\U0001f4e1",
":scream:": u"\U0001f631",
":first_quarter_moon:": u"\U0001f313",
":jp:": u"\U0001f1ef\U0001f1f5",
":repeat_one:": u"\U0001f502",
":shell:": u"\U0001f41a",
":interrobang:": u"\U00002049",
":trident:": u"\U0001f531",
":u55b6:": u"\U0001f23a",
":atm:": u"\U0001f3e7",
":door:": u"\U0001f6aa",
":kissing:": u"\U0001f617",
":six_pointed_star:": u"\U0001f52f",
":thumbsup:": u"\U0001f44d",
":u6708:": u"\U0001f237",
":do_not_litter:": u"\U0001f6af",
":whale2:": u"\U0001f40b",
":school_satchel:": u"\U0001f392",
":cactus:": u"\U0001f335",
":clipboard:": u"\U0001f4cb",
":dizzy:": u"\U0001f4ab",
":waxing_gibbous_moon:": u"\U0001f314",
":camera:": u"\U0001f4f7",
":capital_abcd:": u"\U0001f520",
":leaves:": u"\U0001f343",
":left_luggage:": u"\U0001f6c5",
":bamboo:": u"\U0001f38d",
":bowling:": u"\U0001f3b3",
":eight:": u"\U00000038\U000020e3",
":kimono:": u"\U0001f458",
":left_right_arrow:": u"\U00002194",
":stuck_out_tongue_winking_eye:": u"\U0001f61c",
":surfer:": u"\U0001f3c4",
":sweat:": u"\U0001f613",
":violin:": u"\U0001f3bb",
":postbox:": u"\U0001f4ee",
":bride_with_veil:": u"\U0001f470",
":recycle:": u"\U0000267b",
":station:": u"\U0001f689",
":vhs:": u"\U0001f4fc",
":crossed_flags:": u"\U0001f38c",
":memo:": u"\U0001f4dd",
":no_entry:": u"\U000026d4",
":white_circle:": u"\U000026aa",
":arrow_lower_left:": u"\U00002199",
":chestnut:": u"\U0001f330",
":crystal_ball:": u"\U0001f52e",
":last_quarter_moon:": u"\U0001f317",
":loud_sound:": u"\U0001f50a",
":strawberry:": u"\U0001f353",
":worried:": u"\U0001f61f",
":circus_tent:": u"\U0001f3aa",
":weary:": u"\U0001f629",
":bathtub:": u"\U0001f6c1",
":snake:": u"\U0001f40d",
":grin:": u"\U0001f601",
":symbols:": u"\U0001f523",
":airplane:": u"\U00002708",
":heart_eyes:": u"\U0001f60d",
":sailboat:": u"\U000026f5",
":stew:": u"\U0001f372",
":tshirt:": u"\U0001f455",
":rat:": u"\U0001f400",
":black_medium_square:": u"\U000025fc",
":clock930:": u"\U0001f564",
":full_moon_with_face:": u"\U0001f31d",
":japanese_goblin:": u"\U0001f47a",
":restroom:": u"\U0001f6bb",
":vertical_traffic_light:": u"\U0001f6a6",
":basketball:": u"\U0001f3c0",
":cherry_blossom:": u"\U0001f338",
":low_brightness:": u"\U0001f505",
":pill:": u"\U0001f48a",
# ASCII
":shrug:": u'\xaf\\_(\u30c4)_/\xaf',
":flip:": u"(\u256f\xb0\u25a1\xb0\uff09\u256f\ufe35 \u253b\u2501\u253b",
":gimmie:": u"\u0f3c \u3064 \u25d5_\u25d5 \u0f3d\u3064",
":lenny:": u"( \u0361\xb0 \u035c\u0296 \u0361\xb0)",
":yuno:": u'\u10da(\u0ca0\u76ca\u0ca0\u10da)',
":disapproval:": u'\u0ca0_\u0ca0',
}
|
Miserlou/Emo
|
emo/code.py
|
Python
|
mit
| 46,810
|
[
"Octopus"
] |
44cc93338ce45bc13de682c44bb85a1f9e6f06f9d847b5e6146addcbdca4d547
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
from monty.json import MontyDecoder
from pymatgen.alchemy.filters import (
ContainsSpecieFilter,
RemoveDuplicatesFilter,
RemoveExistingFilter,
SpecieProximityFilter,
)
from pymatgen.alchemy.transmuters import StandardTransmuter
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Species
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
class ContainsSpecieFilterTest(PymatgenTest):
def test_filtering(self):
coords = [[0, 0, 0], [0.75, 0.75, 0.75], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25]]
lattice = Lattice([[3.0, 0.0, 0.0], [1.0, 3.0, 0.00], [0.00, -2.0, 3.0]])
s = Structure(
lattice,
[
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
],
coords,
)
species1 = [Species("Si", 5), Species("Mg", 2)]
f1 = ContainsSpecieFilter(species1, strict_compare=True, AND=False)
self.assertFalse(f1.test(s), "Incorrect filter")
f2 = ContainsSpecieFilter(species1, strict_compare=False, AND=False)
self.assertTrue(f2.test(s), "Incorrect filter")
species2 = [Species("Si", 4), Species("Mg", 2)]
f3 = ContainsSpecieFilter(species2, strict_compare=True, AND=False)
self.assertTrue(f3.test(s), "Incorrect filter")
f4 = ContainsSpecieFilter(species2, strict_compare=False, AND=False)
self.assertTrue(f4.test(s), "Incorrect filter")
species3 = [Species("Si", 5), Species("O", -2)]
f5 = ContainsSpecieFilter(species3, strict_compare=True, AND=True)
self.assertFalse(f5.test(s), "Incorrect filter")
f6 = ContainsSpecieFilter(species3, strict_compare=False, AND=True)
self.assertTrue(f6.test(s), "Incorrect filter")
species4 = [Species("Si", 4), Species("Mg", 2)]
f7 = ContainsSpecieFilter(species4, strict_compare=True, AND=True)
self.assertFalse(f7.test(s), "Incorrect filter")
f8 = ContainsSpecieFilter(species4, strict_compare=False, AND=True)
self.assertFalse(f8.test(s), "Incorrect filter")
def test_to_from_dict(self):
species1 = ["Si5+", "Mg2+"]
f1 = ContainsSpecieFilter(species1, strict_compare=True, AND=False)
d = f1.as_dict()
self.assertIsInstance(ContainsSpecieFilter.from_dict(d), ContainsSpecieFilter)
class SpecieProximityFilterTest(PymatgenTest):
def test_filter(self):
s = self.get_structure("Li10GeP2S12")
sf = SpecieProximityFilter({"Li": 1})
self.assertTrue(sf.test(s))
sf = SpecieProximityFilter({"Li": 2})
self.assertFalse(sf.test(s))
sf = SpecieProximityFilter({"P": 1})
self.assertTrue(sf.test(s))
sf = SpecieProximityFilter({"P": 5})
self.assertFalse(sf.test(s))
def test_to_from_dict(self):
sf = SpecieProximityFilter({"Li": 1})
d = sf.as_dict()
self.assertIsInstance(SpecieProximityFilter.from_dict(d), SpecieProximityFilter)
class RemoveDuplicatesFilterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self._struct_list = [e.structure for e in entries]
self._sm = StructureMatcher()
def test_filter(self):
transmuter = StandardTransmuter.from_structures(self._struct_list)
fil = RemoveDuplicatesFilter()
transmuter.apply_filter(fil)
self.assertEqual(len(transmuter.transformed_structures), 11)
def test_to_from_dict(self):
fil = RemoveDuplicatesFilter()
d = fil.as_dict()
self.assertIsInstance(RemoveDuplicatesFilter().from_dict(d), RemoveDuplicatesFilter)
class RemoveExistingFilterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self._struct_list = [e.structure for e in entries]
self._sm = StructureMatcher()
self._exisiting_structures = self._struct_list[:-1]
def test_filter(self):
fil = RemoveExistingFilter(self._exisiting_structures)
transmuter = StandardTransmuter.from_structures(self._struct_list)
transmuter.apply_filter(fil)
self.assertEqual(len(transmuter.transformed_structures), 1)
self.assertTrue(
self._sm.fit(
self._struct_list[-1],
transmuter.transformed_structures[-1].final_structure,
)
)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/alchemy/tests/test_filters.py
|
Python
|
mit
| 5,104
|
[
"pymatgen"
] |
9c106d253ef20e22e8960d36f5e27e23f1b0b584f368b12ddd75c5ccc54b5536
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_double, ARRAY
from enum import IntEnum
import numpy as np
from .utils import CxxPointer, ChemfilesError
from .ffi import chfl_cellshape, chfl_vector3d
class CellShape(IntEnum):
"""
Available cell shapes in Chemfiles:
- ``CellType.Orthorhombic``: for cells where the three angles are 90°;
- ``CellType.Triclinic``: for cells where the three angles may not be 90°;
- ``CellType.Infinite``: for cells without periodic boundary conditions;
"""
Orthorhombic = chfl_cellshape.CHFL_CELL_ORTHORHOMBIC
Triclinic = chfl_cellshape.CHFL_CELL_TRICLINIC
Infinite = chfl_cellshape.CHFL_CELL_INFINITE
class UnitCell(CxxPointer):
"""
An :py:class:`UnitCell` represent the box containing the atoms, and its
periodicity.
An unit cell is fully represented by three lengths (a, b, c); and three
angles (alpha, beta, gamma). The angles are stored in degrees, and the
lengths in Angstroms. The cell angles are defined as follow: alpha is the
angles between the cell vectors `b` and `c`; beta as the angle between `a`
and `c`; and gamma as the angle between `a` and `b`.
A cell also has a matricial representation, by projecting the three base
vector into an orthonormal base. We choose to represent such matrix as an
upper triangular matrix:
.. code-block:: sh
| a_x b_x c_x |
| 0 b_y c_y |
| 0 0 c_z |
"""
def __init__(self, lengths, angles=(90.0, 90.0, 90.0)):
"""
Create a new :py:class:`UnitCell` with the given cell ``lengths`` and
cell ``angles``. If ``lengths`` is a 3x3 matrix, it is taken to be the
unit cell matrix, and ``angles`` is ignored.
If the three angles are equal to 90.0, the new unit cell shape is
``CellShape.Orthorhombic``. Else it is ``CellShape.Infinite``.
"""
lengths = np.array(lengths)
if len(lengths.shape) == 1:
lengths = chfl_vector3d(*lengths)
angles = chfl_vector3d(*angles)
ptr = self.ffi.chfl_cell(lengths, angles)
else:
if lengths.shape != (3, 3):
raise ChemfilesError(
"expected the cell matrix to have 3x3 shape, got {}".format(
lengths.shape
)
)
matrix = ARRAY(chfl_vector3d, (3))()
matrix[0][0] = lengths[0, 0]
matrix[0][1] = lengths[0, 1]
matrix[0][2] = lengths[0, 2]
matrix[1][0] = lengths[1, 0]
matrix[1][1] = lengths[1, 1]
matrix[1][2] = lengths[1, 2]
matrix[2][0] = lengths[2, 0]
matrix[2][1] = lengths[2, 1]
matrix[2][2] = lengths[2, 2]
ptr = self.ffi.chfl_cell_from_matrix(matrix)
super(UnitCell, self).__init__(ptr, is_const=False)
def __copy__(self):
return UnitCell.from_mutable_ptr(None, self.ffi.chfl_cell_copy(self.ptr))
def __repr__(self):
return """UnitCell(
lengths=({:.9g}, {:.9g}, {:.9g}),
angles=({:.7g}, {:.7g}, {:.7g})
)""".format(
*(self.lengths + self.angles)
)
@property
def lengths(self):
"""Get the three lengths of this :py:class:`UnitCell`, in Angstroms."""
lengths = chfl_vector3d(0, 0, 0)
self.ffi.chfl_cell_lengths(self.ptr, lengths)
return lengths[0], lengths[1], lengths[2]
@lengths.setter
def lengths(self, lengths):
"""
Set the three lengths of this :py:class:`UnitCell` to ``lengths``. The
values should be in Angstroms.
"""
a, b, c = lengths
self.ffi.chfl_cell_set_lengths(self.mut_ptr, chfl_vector3d(a, b, c))
@property
def angles(self):
"""Get the three angles of this :py:class:`UnitCell`, in degrees."""
angles = chfl_vector3d(0, 0, 0)
self.ffi.chfl_cell_angles(self.ptr, angles)
return angles[0], angles[1], angles[2]
@angles.setter
def angles(self, angles):
"""
Set the three angles of this :py:class:`UnitCell` to ``alpha``,
``beta`` and ``gamma``. These values should be in degrees. Setting
angles is only possible for ``CellShape.Triclinic`` cells.
"""
alpha, beta, gamma = angles
self.ffi.chfl_cell_set_angles(self.mut_ptr, chfl_vector3d(alpha, beta, gamma))
@property
def matrix(self):
"""
Get this :py:class:`UnitCell` matricial representation.
The matricial representation is obtained by aligning the a vector along
the *x* axis and putting the b vector in the *xy* plane. This make the
matrix an upper triangular matrix:
.. code-block:: sh
| a_x b_x c_x |
| 0 b_y c_y |
| 0 0 c_z |
"""
m = ARRAY(chfl_vector3d, 3)()
self.ffi.chfl_cell_matrix(self.ptr, m)
return np.array(
(
(m[0][0], m[0][1], m[0][2]),
(m[1][0], m[1][1], m[1][2]),
(m[2][0], m[2][1], m[2][2]),
)
)
@property
def shape(self):
"""Get the shape of this :py:class:`UnitCell`."""
shape = chfl_cellshape()
self.ffi.chfl_cell_shape(self.ptr, shape)
return CellShape(shape.value)
@shape.setter
def shape(self, shape):
"""Set the shape of this :py:class:`UnitCell` to ``shape``."""
self.ffi.chfl_cell_set_shape(self.mut_ptr, chfl_cellshape(shape))
@property
def volume(self):
"""Get the volume of this :py:class:`UnitCell`."""
volume = c_double()
self.ffi.chfl_cell_volume(self.ptr, volume)
return volume.value
def wrap(self, vector):
"""
Wrap a ``vector`` in this :py:class:`UnitCell`, and return the wrapped
vector.
"""
vector = chfl_vector3d(vector[0], vector[1], vector[2])
self.ffi.chfl_cell_wrap(self.ptr, vector)
return (vector[0], vector[1], vector[2])
|
Luthaf/Chemharp-python
|
chemfiles/cell.py
|
Python
|
mpl-2.0
| 6,172
|
[
"Chemfiles"
] |
d4128a1a1b38a7473031123b04872561f5b0088706dc5e26e058b56f68742064
|
#! /usr/bin/env python
#coding:utf-8
import sys,os
import random
import time
import tty,termios
MAP_WIDTH = 8
MAP_HEIGHT = 5
AI_GATEKEEPER = True
class _Getch():
def __call__(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def get_action():
getch = _Getch()
action = ord(getch())
if action == 27:
action = ord(getch())
if action == 27:
## Double prese esc to exit
return "ESC"
action = ord(getch())
if action == 65:
return "UP"
elif action == 66:
return "DOWN"
elif action == 67:
return "RIGHT"
elif action == 68:
return "LEFT"
else:
print "Pressed x1b[%s value:actionm type:3"%(chr(action),action)
return "ESC"
elif action == 3:
return "ESC"
else:
return chr(action).upper()
class MapModel():
def __init__(self):
self.width = MAP_WIDTH
self.height = MAP_HEIGHT
self.cell_size = (self.width, self.height)
self.grid_width = self.width*4+1
self.grid_height = self.height*4+1
#for each cell there should be 3*3 size empty, and 1 for edge wall
self.grid_size = (self.grid_width, self.grid_height)
self.item_list = ["unknown","wall","space",\
"door_1","door_2","door_3","door_4",\
"key_1","key_2","key_3","key_4",\
"player_A","player_B","gatekeeper"]
self.grid_dict = {x:{y:"space" for y in xrange(self.grid_height)} \
for x in xrange(self.grid_width)} \
#{[grid_x][grid_y]: itemid}
#initialize grid map
self.cell_dict = {x:{y:[] for y in xrange(self.height)} \
for x in xrange(self.width)}
#{[cell_x][cell_y]: [itemid]}
#initialize cell map
self.player_loc = {}
#itemid: [cell_x][cell_y]
self._generate_random_map()
def _generate_random_map(self):
#generate wall
for x in xrange(self.grid_width):
for y in xrange(self.grid_height):
if (y%4 == 0) or (x %4 == 0):
self.grid_dict[x][y] = "wall"
#generate door list
door_list = []
for x in xrange(1,self.grid_width-1):
for y in xrange(1,self.grid_height-1):
if x%2 == 0 and x%4 != 0 and y%4 == 0:
door_list.append((x,y))
if y%2 == 0 and y%4 != 0 and x%4 == 0:
door_list.append((x,y))
#Randomized Kruskal's algorithm
#initiate door2cell and cell2set
door2cell = {} #door: linked cell by this door
cell2set = {} #initial: all cell at one set
for door in door_list:
if door[0]%4 != 0:
door2cell[door] = (((door[0]-2)/4,door[1]/4),((door[0]-2)/4,(door[1]-4)/4))
if door[1]%4 != 0:
door2cell[door] = ((door[0]/4,(door[1]-2)/4),((door[0]-4)/4,(door[1]-2)/4))
for x in xrange(self.width):
for y in xrange(self.height):
cell2set[(x,y)] = set([(x,y)])
#shuffle door list
random.shuffle(door_list)
open_door_list = [] #the opened door list
#if the two cell linked by a door are not in one set, open the door, join the cell
for door in door_list:
cell1,cell2 = door2cell[door]
if cell2set[cell1] != cell2set[cell2]:
open_door_list.append(door)
cell2set[cell2] |= cell2set[cell1]
for cell in cell2set[cell2]:
cell2set[cell] = cell2set[cell2]
for (x,y) in open_door_list:
self.grid_dict[x][y] = "space"
#set rest door to closed door
for (x,y) in set(door_list) - set(open_door_list):
self.grid_dict[x][y] = random.choice(["door_1","door_2","door_3","door_4"])
#four kinds of doors
#drop keys in inner cells
inner_cells = []
for x in xrange(1,self.width-1):
for y in xrange(1,self.height-1):
inner_cells.append((x,y))
random.shuffle(inner_cells)
for i,key in enumerate(["key_1","key_2","key_3","key_4"]):
# four kinds of keys, each can open one kind of doors
x,y = inner_cells[i]
self.cell_dict[x][y].append(key)
#Add players in outer cells
outer_cells = []
for x in xrange(self.width):
for y in xrange(self.height):
if x == 0 or x == self.width-1 or y == 0 or y == self.height-1:
outer_cells.append((x,y))
random.shuffle(outer_cells)
for i,player in enumerate(["player_A","player_B","gatekeeper"]):
x,y = outer_cells[i]
self.cell_dict[x][y].append(player)
self.player_loc[player] = (x,y)
class MapView():
def __init__(self):
###Object Colors
self.grey = "\033[90m%s\033[0m"
self.red = "\033[91m%s\033[0m"
self.green = "\033[92m%s\033[0m"
self.yellow = "\033[93m%s\033[0m"
self.purple = "\033[94m%s\033[0m"
self.pink = "\033[95m%s\033[0m"
self.blue = "\033[96m%s\033[0m"
###Grid Item Represents
self.item2represent = {
"unknown":".",\
"wall" :unichr(0x2588), # This is unicode for a full block
"space" : " ",
"door_1" : self.red%unichr(0x2588),
"door_2" : self.green%unichr(0x2588),
"door_3" : self.purple%unichr(0x2588),
"door_4" : self.pink%unichr(0x2588),
"key_1" : self.red%"F",
"key_2" : self.green%"F",
"key_3" : self.purple%"F",
"key_4" : self.pink%"F",
"player_A" : self.blue%"A",
"player_B" : self.yellow%"B",
"gatekeeper" : self.grey%"G",
"message_A" : self.blue%"@",
"message_B" : self.yellow%"@",
"empty" : "_"
}
def show_map(self,mapm,turn,step,package_dict):
os.system("clear")
###Add grid_items
content_1 = [[self.item2represent[mapm.grid_dict[x][y]] \
for x in xrange(mapm.grid_width)] \
for y in xrange(mapm.grid_height)]
###Add cell_items
for x in xrange(mapm.width):
for y in xrange(mapm.height):
if len(mapm.cell_dict[x][y]) == 0:
continue
elif len(mapm.cell_dict[x][y]) == 1:
content_1[y*4+2][x*4+2] = self.item2represent[mapm.cell_dict[x][y][0]]
##if only one item in that cell, put it in the center of the cell
else:
count_item = len(mapm.cell_dict[x][y])
locuses = [(4*x+2,4*y+2),(4*x+1,4*y+1),(4*x+2,4*y+1),\
(4*x+3,4*y+1),(4*x+1,4*y+2),(4*x+3,4*y+2),\
(4*x+1,4*y+3),(4*x+2,4*y+3),(4*x+3,4*y+3)][:count_item]
##Represent 9 items in cell at most
for item,loc in zip(mapm.cell_dict[x][y][0:9],locuses):
content_1[loc[1]][loc[0]] = self.item2represent[item]
###Content info: User Info
content_2 = ["","",turn,""]
content_2 += ["################################"]
##replace itemsid with items represents
for itemid in ["message_A","message_B","key_1","key_2","key_3","key_4"]:
if itemid in step:
step = step.replace(itemid,self.item2represent[itemid])
content_2 += step.split("\t")
content_2 += ["################################","",""]
for player in ["player_A","player_B","gatekeeper"]:
content_2.append("%s's Package"%player)
player_package = [self.item2represent[item] for item in package_dict[player]]
content_2 += [" ".join(player_package),""]
for i in xrange(len(content_1)):
line_1 = "".join(content_1[i])
line_2 = content_2[i] if i < len(content_2) else ""
sys.stdout.write("%s %s\n"%(line_1,line_2))
class GameController():
def __init__(self):
self.gamemap = MapModel()
self.mapview = MapView()
self.package_dict = {"player_A":["message_A","message_A","message_A","message_A"],\
"player_B":["message_B","message_B","message_B","message_B"],\
"gatekeeper":["empty","empty","empty","empty"]}
def _playerturn(self,player):
turn_info = "%s's Turn"%player
step_info = "Rolling Dicer...\t Press anykey to stop"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
get_action()
step_left = random.randint(1,6)
step_info = "You got %d step left\t"%step_left
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
##Move
while step_left > 0:
step_info = "You got %d step left\t"%step_left
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
move = get_action()
## If not valid_move, continue
valid_move = False
##Exit if move == "ESC" (double pres Esc or control+c)
if move == "ESC":
return "ESC"
##TODO save current game/ensure exit
if move not in set(["UP","DOWN","RIGHT","LEFT"]):
continue
current_cell = self.gamemap.player_loc[player]
if move == "UP":
adj_cell = (current_cell[0],current_cell[1]-1)
elif move == "DOWN":
adj_cell = (current_cell[0],current_cell[1]+1)
elif move == "LEFT":
adj_cell = (current_cell[0]-1,current_cell[1])
elif move == "RIGHT":
adj_cell = (current_cell[0]+1,current_cell[1])
grid_between_cell = (2*current_cell[0]+2*adj_cell[0]+2,\
2*current_cell[1]+2*adj_cell[1]+2)
item_bewteen_cell = self.gamemap.grid_dict[grid_between_cell[0]]\
[grid_between_cell[1]]
##Check if adjacent is wall or space
if item_bewteen_cell == "wall":
continue
elif item_bewteen_cell == "space":
valid_move = True
else:
##There should be no forth option except for["wall","space","door_x"]
assert item_bewteen_cell.startswith("door_")
door_id = item_bewteen_cell.split("_")[1]
for item in self.package_dict[player]:
##If player has key and the key number == door numbe
if item.startswith("key_") and item.split("_")[1] == door_id:
valid_move = True
if valid_move:
## If it is a valid_move, move and continue
self.gamemap.cell_dict[current_cell[0]][current_cell[1]].remove(player)
self.gamemap.cell_dict[adj_cell[0]][adj_cell[1]].append(player)
self.gamemap.player_loc[player] = adj_cell
step_left -= 1
##Do Action after move
##Only one action can be done each turn
step_info = "Action: (D)rop (P)ick (E)nd\t"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
action = get_action()
player_loc = self.gamemap.player_loc[player]
cell_items = self.gamemap.cell_dict[player_loc[0]][player_loc[1]]
while action not in set(["D","P","E","ESC"]):
step_info = "%s id not a valid action\tAction: (D)rop (P)ick (E)nd"%action
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
action = get_action()
if action == "ESC":
return "ESC"
elif action == "E":
step_info = "\tYour Turn is End"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
elif action == "D":
dropable_items = ["message_A","message_B","key_1","key_2","key_3","key_4"]
dropable_items_in_package = []
for item in self.package_dict[player]:
if item in dropable_items:
dropable_items_in_package.append(item)
dropable_items_in_package = list(set(dropable_items_in_package))
## If cell is full, you can not drop anything
if len(cell_items) >= 9:
step_info = "Cell is full, you can not drop here\tYour Turn is End"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
## If player's package has nothing, end turn
if len(dropable_items_in_package) == 0:
step_info = "You have nothing to Drop, \tYour Turn is End"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
if len(dropable_items_in_package) == 1:
choice = 0
else:
drop_choice = []
for i,item in enumerate(dropable_items_in_package):
##drop_choice index starts from 1, while real list index starts from 0
drop_choice.append("%d:%s"%(i+1,item))
step_info = "Choose Something to Drop, \t %s"%" ".join(drop_choice)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
choice = get_action()
if choice not in set([str(i+1) for i in xrange(len(dropable_items_in_package))]):
step_info = "%s is not a valid choice\tYour Turn is End"%choice
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
else:
choice = int(choice)-1
item = dropable_items_in_package[choice]
cell_items.append(item)
##replace first item with "empty"
self.package_dict[player][self.package_dict[player].index(item)] = "empty"
step_info = "You droped %s \tYour Turn is End"%item
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
elif action == "P":##Pick up something is possible
pickable_items = ["message_A","message_B","key_1","key_2","key_3","key_4"]
pickable_items_in_cell = []
for item in cell_items:
if item in pickable_items:
pickable_items_in_cell.append(item)
##filter out redundant items
pickable_items_in_cell = list(set(pickable_items_in_cell))
if len(pickable_items_in_cell) == 0:
step_info = "There is nothing pickable in this cell \t Your Turn is End"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
if "empty" not in self.package_dict[player]:
step_info = "Your package is full, drop something first \t Your Turn is End"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
## if only one kind of item in that cell, pick that
if len(pickable_items_in_cell) == 1:
choice = 0
else:
pick_choice = []
for i,item in enumerate(pickable_items_in_cell):
pick_choice.append("%d:%s"%(i+1,item))
step_info = "Pickup something \t %s"%" ".join(pick_choice)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
choice = get_action()
if choice not in set([str(i+1) for i in xrange(len(pickable_items_in_cell))]):
step_info = "%s is not a valid choice\tYour Turn is End"%choice
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
else:
choice = int(choice)-1
item = pickable_items_in_cell[0]
cell_items.remove(item)
self.package_dict[player][self.package_dict[player].index("empty")] = item
step_info = "Picked %s \t Your Turn is End"%(item)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
def _AIgetekeeper_turn(self):
player = "gatekeeper"
turn_info = "AIgatekeeper's Turn"
step_info = "Rolling Dicer...\t"
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
step_left = random.randint(1,6)
step_info = "AIgatekeeper got %d step left\t"%(step_left)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
##Target locate
##If there is any message on map, target = All msg
##If there is no message on map, target = Player_B location
target_loc_set = set([])
for x in xrange(self.gamemap.width):
for y in xrange(self.gamemap.height):
for items in self.gamemap.cell_dict[x][y]:
if items.startswith("message"):
target_loc_set.add((x,y))
if len(target_loc_set) == 0:
target_loc_set.add(self.gamemap.player_loc["player_B"])
start_cell = self.gamemap.player_loc[player]
visited_cells = set([start_cell])
target_paths = []
##if start_cell already in target_loc, target_paths add start_cell
if start_cell in target_loc_set:
target_paths.append([start_cell])
temp_paths = [[start_cell]]
##temp_paths is a path FILO stack, record all paths to target_loc
while temp_paths:
current_path = temp_paths.pop(0)
current_cell = current_path[-1]
x,y = current_cell
for adj_cell in [(x-1,y),(x,y-1),(x+1,y),(x,y+1)]:
grid_between_cell = (2*current_cell[0]+2*adj_cell[0]+2,\
2*current_cell[1]+2*adj_cell[1]+2)
item_bewteen_cell = self.gamemap.grid_dict[grid_between_cell[0]]\
[grid_between_cell[1]]
##if adj_cell can be visit and not been visited, visit it.
if (item_bewteen_cell) != "space" or (adj_cell in visited_cells):
continue
current_path_clone = [a for a in current_path]
current_path_clone.append(adj_cell)
visited_cells.add(adj_cell)
if adj_cell in target_loc_set:
target_paths.append(current_path_clone)
temp_paths.append(current_path_clone)
for path in target_paths:
# The first cell of path == current gatekeeper cell, so starts with 1
if len(path[1:]) == 0:
##Path has only one cell, the gatekeeper just in that the cell
##Find a adjancent available cell, walk repeat these two cells
x,y = path[0]
last_node = path[0]
for adj_cell in [(x-1,y),(x,y-1),(x+1,y),(x,y+1)]:
grid_between_cell = (2*x+2*adj_cell[0]+2,\
2*y+2*adj_cell[1]+2)
item_bewteen_cell = self.gamemap.grid_dict[grid_between_cell[0]]\
[grid_between_cell[1]]
if item_bewteen_cell == "space":
second_last_node = adj_cell
break
for i in xrange(step_left-0):
if i%2 == 0:
path.append(second_last_node)
else:
path.append(last_node)
elif len(path[1:]) < step_left:
#if path to target < step_left, fill it with last two step
last_node = path[-1]
second_last_node = path[-2]
for i in xrange(step_left - len(path[1:])):
if i%2 == 0:
path.append(second_last_node)
else:
path.append(last_node)
best_path = None
second_path = None
other_path = None
for path in target_paths:
if len(path[1:]) == step_left and path[-1] in target_loc_set:
best_path = path
break
elif len(path[1:]) == step_left:
second_path = path
else:
other_path = path
if best_path != None:
gatekeeper_path = best_path
elif second_path != None:
gatekeeper_path = second_path
else:
gatekeeper_path = other_path
last_cell = gatekeeper_path.pop(0)
while gatekeeper_path and step_left != 0:
current_cell = gatekeeper_path.pop(0)
self.gamemap.cell_dict[last_cell[0]][last_cell[1]].remove(player)
self.gamemap.cell_dict[current_cell[0]][current_cell[1]].append(player)
self.gamemap.player_loc[player] = current_cell
step_left -= 1
step_info = "AIgatekeeper got %d step left\t"%(step_left)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(0.5)
last_cell = current_cell
for item in self.gamemap.cell_dict[last_cell[0]][last_cell[1]]:
##if cell has message, pickup message
if item.startswith("message_"):
self.gamemap.cell_dict[last_cell[0]][last_cell[1]].remove(item)
self.package_dict[player][self.package_dict[player].index("empty")] = item
step_info = "Picked up %s\t AIgatekeeper's Turn is End"%(item)
self.mapview.show_map(self.gamemap,turn_info,step_info,self.package_dict)
time.sleep(1)
return 1
return 1
def play(self,AIgatekeeper=None):
if AIgatekeeper == None:
AIgatekeeper == False
game_not_end = True
while game_not_end:
for player in ["player_A","player_B","gatekeeper"]:
if AIgatekeeper and player == "gatekeeper":
flag = self._AIgetekeeper_turn()
else:
flag = self._playerturn(player)
if flag == "ESC":
game_not_end = False
break
if set(self.package_dict["gatekeeper"]) == set(["message_A","message_B"]):
self.mapview.show_map(self.gamemap,""," GateKeeper WIN\t",self.package_dict)
game_not_end = False
break
elif self.package_dict["player_A"].count("message_B") +\
self.package_dict["player_B"].count("message_A") >= 5:
self.mapview.show_map(self.gamemap,"","Player_A & Player_B WIN\t\t",self.package_dict)
game_not_end = False
break
else:
continue
def main():
game = GameController()
game.play(AI_GATEKEEPER)
if __name__ == "__main__":
main()
|
bingwang619/Game_GetMessage
|
GetMessage.py
|
Python
|
gpl-2.0
| 24,461
|
[
"VisIt"
] |
f82771c5c66099ba9e1557cd7d9dc9b096e046bb9ef0755569ba84c873f8e4f9
|
import codecs
import os
from PyQt4.QtCore import Qt
from Code import ControlPosicion
from Code import Gestor
from Code import Jugada
from Code import PGN
from Code import Partida
from Code.QT import DatosNueva
from Code.QT import Iconos
from Code.QT import PantallaGM
from Code.QT import QTUtil
from Code.QT import QTUtil2
from Code.QT import QTVarios
from Code import TrListas
from Code import Tutor
from Code import Util
from Code import VarGen
from Code.Constantes import *
class GestorEntPos(Gestor.Gestor):
def ponEntreno(self, entreno):
# Guarda el ultimo entrenamiento en el db de entrenos
self.entreno = entreno
def guardaPosicion(self, posEntreno):
db = Util.DicSQL(self.configuracion.ficheroTrainings)
data = db[self.entreno]
if data is None:
data = {}
data["POSULTIMO"] = posEntreno
db[self.entreno] = data
db.close()
def inicio(self, posEntreno, numEntrenos, titEntreno, liEntrenos, siTutorActivado=None, saltoAutomatico=False):
if hasattr(self, "reiniciando"):
if self.reiniciando:
return
self.reiniciando = True
if siTutorActivado is None:
siTutorActivado = (VarGen.dgtDispatch is None) and self.configuracion.tutorActivoPorDefecto
self.posEntreno = posEntreno
self.guardaPosicion(posEntreno)
self.numEntrenos = numEntrenos
self.titEntreno = titEntreno
self.liEntrenos = liEntrenos
self.saltoAutomatico = saltoAutomatico
self.liHistorico = [self.posEntreno]
self.ayudas = 99999
fenInicial = self.liEntrenos[self.posEntreno - 1].strip()
self.fenInicial = fenInicial
self.rivalPensando = False
self.dicEtiquetasPGN = None
# Dirigido
etiDirigido = ""
self.siDirigido = False
self.siDirigidoSeguir = None
self.siDirigidoVariantes = False
solucion = None
siPartidaOriginal = False
if "|" in fenInicial:
li = fenInicial.split("|")
fenInicial = li[0]
if fenInicial.endswith(" 0"):
fenInicial = fenInicial[:-1] + "1"
nli = len(li)
if nli >= 2:
etiDirigido = li[1]
# # Solucion
if nli >= 3:
solucion = li[2]
if solucion:
self.dicDirigidoFen = PGN.leeEntDirigido(fenInicial, solucion)
self.siDirigido = len(self.dicDirigidoFen) > 0
# Partida original
if nli >= 4:
if nli > 4:
txt = "|".join(li[3:])
else:
txt = li[3]
txt = txt.replace("]", "]\n").replace(" [", "[")
pgn = PGN.UnPGN()
pgn.leeTexto(txt)
partida = pgn.partida
siEstaFen = False
njug = partida.numJugadas()
for n in range(njug - 1, -1, -1):
jg = partida.jugada(n)
if jg.posicion.fen() == fenInicial:
siEstaFen = True
if n + 1 != njug:
partida.liJugadas = partida.liJugadas[:n + 1]
partida.ultPosicion = jg.posicion.copia()
break
if siEstaFen:
siPartidaOriginal = True
self.partida = partida
self.pgn.partida = partida
self.dicEtiquetasPGN = pgn.dic
# if etiDirigido:
# etiDirigido += "<br>"
# for k, v in pgn.dic.iteritems():
# if k.upper() != "FEN":
# if etiDirigido:
# etiDirigido += "<br>"
# etiDirigido += "%s: <b>%s</b>"%(k,v)
cp = ControlPosicion.ControlPosicion()
cp.leeFen(fenInicial)
self.fen = fenInicial
siBlancas = cp.siBlancas
if not siPartidaOriginal:
self.partida.reset(cp)
if solucion:
tmp_pgn = PGN.UnPGN()
tmp_pgn.leeTexto('[FEN "%s"]\n%s' % (fenInicial, solucion))
if tmp_pgn.partida.firstComment:
self.partida.setFirstComment(tmp_pgn.partida.firstComment, True)
self.partida.pendienteApertura = False
self.tipoJuego = kJugEntPos
self.siJuegaHumano = False
self.estado = kJugando
self.siJuegaPorMi = True
self.siJugamosConBlancas = siBlancas
self.siRivalConBlancas = not siBlancas
self.liVariantes = []
self.rmRival = None
self.siTutorActivado = siTutorActivado
self.pantalla.ponActivarTutor(self.siTutorActivado)
self.ayudasPGN = 0
liOpciones = [k_mainmenu, k_cambiar, k_reiniciar, k_atras]
if self.dicEtiquetasPGN:
liOpciones.append(k_pgnInformacion)
liOpciones.extend((k_configurar, k_utilidades))
if self.numEntrenos > 1:
liOpciones.extend((k_anterior, k_siguiente))
self.liOpcionesToolBar = liOpciones
self.pantalla.ponToolBar(liOpciones)
self.pantalla.activaJuego(True, False, siAyudas=False)
self.pantalla.quitaAyudas(False, False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.ponPiezasAbajo(siBlancas)
titulo = "<b>%s</b>" % TrListas.dicTraining().get(self.titEntreno, self.titEntreno)
if etiDirigido:
titulo += "<br>%s" % etiDirigido
self.ponRotulo1(titulo)
self.ponRotulo2("%d / %d" % (posEntreno, numEntrenos))
self.pgnRefresh(True)
QTUtil.xrefreshGUI()
if self.xrival is None:
self.xrival = self.procesador.creaGestorMotor(self.configuracion.tutor, self.configuracion.tiempoTutor, self.configuracion.depthTutor)
self.siAnalizadoTutor = False
self.ponPosicionDGT()
if siPartidaOriginal:
# self.ponteAlFinal()
self.repiteUltimaJugada()
self.reiniciando = False
self.rivalPensando = False
self.siguienteJugada()
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_atras:
self.atras()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_variantes:
self.lanzaVariantes()
elif clave == k_configurar:
self.configurar(siSonidos=True, siCambioTutor=True)
elif clave == k_cambiar:
self.ent_otro()
elif clave == k_utilidades:
if "/Tactics/" in self.entreno:
liMasOpciones = []
else:
liMasOpciones = [("tactics", _("Create tactics training"), Iconos.Tacticas()),
(None, None, None)]
liMasOpciones.append(("play", _('Play current position'), Iconos.MoverJugar()))
resp = self.utilidades(liMasOpciones)
if resp == "tactics":
self.createTactics()
elif resp == "play":
self.jugarPosicionActual()
elif clave == k_pgnInformacion:
self.pgnInformacionMenu(self.dicEtiquetasPGN)
elif clave in (k_siguiente, k_anterior):
self.ent_siguiente(clave)
elif clave == k_peliculaSeguir:
self.sigue()
elif clave in self.procesador.liOpcionesInicio:
self.procesador.procesarAccion(clave)
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def reiniciar(self):
if self.rivalPensando:
return
self.inicio(self.posEntreno, self.numEntrenos, self.titEntreno, self.liEntrenos, self.siTutorActivado, self.saltoAutomatico)
def ent_siguiente(self, tipo):
if not (self.siJuegaHumano or self.estado == kFinJuego):
return
pos = self.posEntreno + (+1 if tipo == k_siguiente else -1)
if pos > self.numEntrenos:
pos = 1
elif pos == 0:
pos = self.numEntrenos
self.inicio(pos, self.numEntrenos, self.titEntreno, self.liEntrenos, self.siTutorActivado, self.saltoAutomatico)
def controlTeclado(self, nkey):
if nkey in (Qt.Key_Plus, Qt.Key_PageDown):
self.ent_siguiente(k_siguiente)
elif nkey in (Qt.Key_Minus, Qt.Key_PageUp):
self.ent_siguiente(k_anterior)
elif nkey == Qt.Key_T:
li = self.fenInicial.split("|")
li[2] = self.partida.pgnBaseRAW()
self.saveSelectedPosition("|".join(li))
def listHelpTeclado(self):
return [
("+/%s"%_("Page Down"), _("Next position")),
("-/%s"%_("Page Up"), _("Previous position")),
("T", _("Save position in 'Selected positions' file")),
]
def finPartida(self):
self.procesador.inicio()
def finalX(self):
self.finPartida()
return False
def atras(self):
if self.rivalPensando:
return
if self.partida.numJugadas():
self.partida.anulaUltimoMovimiento(self.siJugamosConBlancas)
self.ponteAlFinal()
self.siAnalizadoTutor = False
self.estado = kJugando
self.refresh()
self.siguienteJugada()
def siguienteJugada(self):
if self.estado == kFinJuego:
if self.siDirigido and self.saltoAutomatico:
self.ent_siguiente(k_siguiente)
return
self.siPiensaHumano = False
self.compruebaComentarios()
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
if self.partida.numJugadas() > 0:
jgUltima = self.partida.last_jg()
if jgUltima.siJaqueMate:
self.ponResultado(kGanaRival if self.siJugamosConBlancas == siBlancas else kGanamos)
return
if jgUltima.siAhogado:
self.ponResultado(kTablas)
return
if jgUltima.siTablasRepeticion:
self.ponResultado(kTablasRepeticion)
return
if jgUltima.siTablas50:
self.ponResultado(kTablas50)
return
if jgUltima.siTablasFaltaMaterial:
self.ponResultado(kTablasFaltaMaterial)
return
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
if siRival:
self.piensaRival()
else:
self.piensaHumano(siBlancas)
def piensaHumano(self, siBlancas):
fen = self.partida.ultPosicion.fen()
if self.siDirigido and (fen in self.dicDirigidoFen) \
and not self.dicDirigidoFen[fen] and self.siTutorActivado:
self.lineaTerminadaOpciones()
return
self.siJuegaHumano = True
self.activaColor(siBlancas)
def piensaRival(self):
self.rivalPensando = True
pensarRival = True
fen = self.partida.ultPosicion.fen()
if self.siDirigido and self.siTutorActivado:
my_last_fen = self.dicDirigidoFen.keys()[-1]
if (fen in self.dicDirigidoFen) and (fen != my_last_fen):
liOpciones = self.dicDirigidoFen[fen]
if liOpciones:
liJugadas = []
for siMain, jg in liOpciones:
desde, hasta, coronacion = jg.desde, jg.hasta, jg.coronacion
if not self.siDirigidoVariantes:
if siMain:
liJugadas = []
break
rotulo = _("Main line") if siMain else ""
pgn = self.partida.ultPosicion.pgn(desde, hasta, coronacion)
liJugadas.append((desde, hasta, coronacion, rotulo, pgn))
if len(liJugadas) > 1:
desde, hasta, coronacion = PantallaGM.eligeJugada(self, liJugadas, False)
if len(liOpciones) > 1:
self.guardaVariantes()
pensarRival = False
if pensarRival and self.siDirigidoSeguir is None:
self.lineaTerminadaOpciones()
self.rivalPensando = False
return
if pensarRival:
self.pensando(True)
self.desactivaTodas()
self.rmRival = self.xrival.juega()
self.pensando(False)
desde, hasta, coronacion = self.rmRival.desde, self.rmRival.hasta, self.rmRival.coronacion
if self.mueveRival(desde, hasta, coronacion):
self.rivalPensando = False
self.siguienteJugada()
else:
self.rivalPensando = False
def sigue(self):
self.estado = kJugando
self.siDirigido = False
self.siDirigidoSeguir = True
if k_peliculaSeguir in self.liOpcionesToolBar:
del self.liOpcionesToolBar[self.liOpcionesToolBar.index(k_peliculaSeguir)]
self.pantalla.ponToolBar(self.liOpcionesToolBar)
self.siguienteJugada()
def lineaTerminadaOpciones(self):
self.estado = kFinJuego
if self.saltoAutomatico:
self.ent_siguiente(k_siguiente)
return False
else:
QTUtil2.mensajeTemporal(self.pantalla, _("This line training is completed."), 0.7)
if not self.siTerminada():
if k_peliculaSeguir not in self.liOpcionesToolBar:
self.liOpcionesToolBar.insert(4, k_peliculaSeguir)
self.pantalla.ponToolBar(self.liOpcionesToolBar)
return False
def mueveHumano(self, desde, hasta, coronacion=None):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
movimiento = jg.movimiento()
siMirarTutor = self.siTutorActivado
if self.siTeclaPanico:
self.sigueHumano()
return False
if siMirarTutor:
fen = self.partida.ultPosicion.fen()
if self.siDirigido and fen in self.dicDirigidoFen:
liOpciones = self.dicDirigidoFen[fen]
if len(liOpciones) > 1:
self.guardaVariantes()
liMovs = []
siEsta = False
posMain = None
for siMain, jg1 in liOpciones:
mv = jg1.movimiento()
if siMain:
posMain = mv[:2]
if mv.lower() == movimiento.lower():
if self.siDirigidoVariantes:
siEsta = True
else:
siEsta = siMain
if siEsta:
break
liMovs.append((jg1.desde, jg1.hasta, siMain))
if not siEsta:
self.ponPosicion(self.partida.ultPosicion)
if posMain and posMain != movimiento[:2]:
self.tablero.markPosition(posMain)
else:
self.tablero.ponFlechasTmp(liMovs)
self.sigueHumano()
return False
else:
if not self.siAnalizadoTutor:
self.analizaTutor()
if self.mrmTutor.mejorMovQue(movimiento):
if not jg.siJaqueMate:
tutor = Tutor.Tutor(self, self, jg, desde, hasta, False)
if tutor.elegir(True):
self.reponPieza(desde)
desde = tutor.desde
hasta = tutor.hasta
coronacion = tutor.coronacion
siBien, mens, jgTutor = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta,
coronacion)
if siBien:
jg = jgTutor
del tutor
self.mrmTutor = None
if self.siTeclaPanico:
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, True)
self.error = ""
if self.siTutorActivado and self.siDirigido and (self.partida.ultPosicion.fen() not in self.dicDirigidoFen):
self.lineaTerminadaOpciones()
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
self.partida.ultPosicion = jg.posicion
# Preguntamos al mono si hay movimiento
if self.siTerminada():
jg.siJaqueMate = jg.siJaque
jg.siAhogado = not jg.siJaque
self.estado = kFinJuego
resp = self.partida.si3repetidas()
if resp:
jg.siTablasRepeticion = True
rotulo = ""
for j in resp:
rotulo += "%d," % (j / 2 + 1,)
rotulo = rotulo.strip(",")
self.rotuloTablasRepeticion = rotulo
if self.partida.ultPosicion.movPeonCap >= 100:
jg.siTablas50 = True
if self.partida.ultPosicion.siFaltaMaterial():
jg.siTablasFaltaMaterial = True
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, desde, hasta, coronacion):
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
if siBien:
self.siAnalizadoTutor = False
self.partida.ultPosicion = jg.posicion
if self.siTutorActivado:
if not self.siDirigido:
self.analizaTutor() # Que analice antes de activar humano, para que no tenga que esperar
self.siAnalizadoTutor = True
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
if self.siTutorActivado and self.siDirigido and ((self.partida.ultPosicion.fen() not in self.dicDirigidoFen)):
self.lineaTerminadaOpciones()
return True
else:
self.error = mens
return False
def ponResultado(self, quien):
self.resultado = quien
self.desactivaTodas()
self.siJuegaHumano = False
self.estado = kFinJuego
if quien == kTablasRepeticion:
self.resultado = kTablas
elif quien == kTablas50:
self.resultado = kTablas
elif quien == kTablasFaltaMaterial:
self.resultado = kTablas
self.desactivaTodas()
self.refresh()
def ent_otro(self):
pos = DatosNueva.numEntrenamiento(self.pantalla, self.titEntreno, self.numEntrenos, pos=self.posEntreno)
if pos is not None:
self.posEntreno = pos
self.reiniciar()
def guardaVariantes(self):
njug = self.partida.numJugadas()
siBlancas = self.partida.siBlancas()
if njug:
jg = self.partida.last_jg()
numj = self.partida.primeraJugada() + (njug + 1) / 2 - 1
titulo = "%d." % numj
if siBlancas:
titulo += "... "
titulo += jg.pgnSP()
else:
titulo = _("Start position")
for tit, txtp, siBlancas in self.liVariantes:
if titulo == tit:
return
self.liVariantes.append((titulo, self.partida.guardaEnTexto(), siBlancas))
if len(self.liVariantes) == 1:
if k_variantes not in self.liOpcionesToolBar:
self.liOpcionesToolBar.append(k_variantes)
self.pantalla.ponToolBar(self.liOpcionesToolBar)
def lanzaVariantes(self):
icoNegro = Iconos.PuntoNegro()
icoVerde = Iconos.PuntoVerde()
menu = QTVarios.LCMenu(self.pantalla)
for n, (tit, txtp, siBlancas) in enumerate(self.liVariantes):
menu.opcion(n, tit, icoVerde if siBlancas else icoNegro)
menu.separador()
resp = menu.lanza()
if resp is not None:
self.lanzaVariantesNumero(resp)
def lanzaVariantesNumero(self, resp):
if resp == -1:
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.fen)
self.partida.reset(cp)
else:
self.partida.recuperaDeTexto(self.liVariantes[resp][1])
self.estado = kJugando
self.siDirigidoVariantes = True
self.siDirigido = True
self.ponteAlFinal()
self.siguienteJugada()
def compruebaComentarios(self):
if not self.partida.liJugadas or not self.siDirigido:
return
fen = self.partida.ultPosicion.fen()
if fen not in self.dicDirigidoFen:
return
jg = self.partida.last_jg()
mv = jg.movimiento()
fen = jg.posicion.fen()
for k, liOpciones in self.dicDirigidoFen.iteritems():
for siMain, jg1 in liOpciones:
if jg1.posicion.fen() == fen and jg1.movimiento() == mv:
if jg1.critica and not jg.critica:
jg.critica = jg1.critica
if jg1.comentario and not jg.comentario:
jg.comentario = jg1.comentario
if jg1.variantes and not jg.variantes:
jg.variantes = jg1.variantes
break
def createTactics(self):
nameTactic = os.path.basename(self.entreno)[:-4]
nomDir = os.path.join(self.configuracion.dirPersonalTraining, "Tactics", nameTactic)
if os.path.isdir(nomDir):
nom = nomDir + "-%d"
n = 1
while os.path.isdir(nom % n):
n += 1
nomDir = nom % n
nomIni = os.path.join(nomDir, "Config.ini")
nomTactic = "TACTIC1"
nomDirTac = os.path.join(VarGen.configuracion.dirPersonalTraining, "Tactics")
Util.creaCarpeta(nomDirTac)
Util.creaCarpeta(nomDir)
nomFNS = os.path.join(nomDir, "Puzzles.fns")
# Se leen todos los fens
f = open(self.entreno)
liBase = []
for linea in f:
liBase.append(linea.strip())
f.close()
# Se crea el fichero con los puzzles
f = codecs.open(nomFNS, "w", "utf-8", 'ignore')
nregs = len(liBase)
tmpBP = QTUtil2.BarraProgreso(self.pantalla, nameTactic, _("Working..."), nregs)
tmpBP.mostrar()
for n in range(nregs):
if tmpBP.siCancelado():
break
tmpBP.pon(n + 1)
linea = liBase[n]
li = linea.split("|")
fen = li[0]
if len(li) < 3 or not li[2]:
# tutor a trabajar
mrm = self.xrival.analiza(fen)
if not mrm.liMultiPV:
continue
rm = mrm.liMultiPV[0]
p = Partida.Partida(fen=fen)
p.leerPV(rm.pv)
pts = rm.puntosABS()
jg = p.jugada(0)
for pos, rm1 in enumerate(mrm.liMultiPV):
if pos:
if rm1.puntosABS() == pts:
p1 = Partida.Partida(fen=fen)
p1.leerPV(rm1.pv)
if pos > 1:
jg.variantes += "\n"
jg.variantes += p1.pgnBaseRAW()
else:
break
jugadas = p.pgnBaseRAW()
txt = fen + "||%s\n" % jugadas
else:
txt = linea
f.write(txt)
f.close()
tmpBP.cerrar()
# Se crea el fichero de control
dicIni = {}
dicIni[nomTactic] = d = {}
d["MENU"] = nameTactic
d["FILESW"] = "%s:100" % os.path.basename(nomFNS)
Util.dic8ini(nomIni, dicIni)
self.mensajeEnPGN(_X(_("Tactic training %1 created."), nomDir) + "<br>" +
_X(_("You can access this training from menu Trainings-Learn tactics by repetition-%1"), nomDir))
self.procesador.entrenamientos.rehaz()
|
lukasmonk/lucaschess
|
Code/GestorEntPos.py
|
Python
|
gpl-2.0
| 25,129
|
[
"SIESTA"
] |
eebbd1911585c30364ee99b8fcb00f3e4bfad694698c8b612577029e3552e605
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import inspect
import os
import re
import sys
import traceback
import gobject
import gtk
from kiwi.accessor import kgetattr
from kiwi.interfaces import IValidatableProxyWidget
from kiwi.ui.objectlist import ObjectList, ObjectTree
from kiwi.ui.views import SignalProxyObject, SlaveView
from kiwi.ui.widgets.combo import ProxyComboBox, ProxyComboEntry
from kiwi.ui.widgets.entry import ProxyDateEntry
from storm.info import get_cls_info
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.database.testsuite import test_system_notifier
from stoqlib.gui.stockicons import register
from stoqlib.lib.countries import countries
from stoqlib.lib.diffutils import diff_lines
from stoqlib.lib.unittestutils import get_tests_datadir
register()
_UUID_RE = re.compile("u'[a-f0-9]{8}-"
"[a-f0-9]{4}-"
"[a-f0-9]{4}-"
"[a-f0-9]{4}-"
"[a-f0-9]{12}'")
def _get_table_packing_properties(parent, child):
return (parent.child_get(child, 'top-attach')[0],
parent.child_get(child, 'bottom-attach')[0],
parent.child_get(child, 'left-attach')[0],
parent.child_get(child, 'right-attach')[0])
class GUIDumper(object):
"""A class used to dump the state of a widget tree and serialize
it into a string that can be saved on disk.
"""
def __init__(self):
self._items = {}
self._slave_holders = {}
self.output = ''
self.failures = []
def _add_namespace(self, obj, prefix=''):
for attr, value in obj.__dict__.items():
try:
self._items[hash(value)] = prefix + attr
except TypeError:
continue
for cls in inspect.getmro(obj.__class__):
for attr, value in cls.__dict__.items():
if isinstance(value, SignalProxyObject):
instance_value = getattr(obj, attr, None)
if instance_value is not None:
self._items[hash(instance_value)] = prefix + attr
if isinstance(obj, SlaveView):
for name, slave in obj.slaves.items():
self._add_namespace(slave)
holder = slave.get_toplevel().get_parent()
self._slave_holders[holder] = type(slave).__name__
def _get_packing_properties(self, widget):
# FIXME: Workaround for GtkWindow::parent property
# on PyGTK for natty
if isinstance(widget, gtk.Window):
return []
parent = widget.props.parent
if not parent:
return []
props = []
if isinstance(parent, gtk.Box):
(expand, fill,
padding, pack_type) = parent.query_child_packing(widget)
if expand:
props.append('expand=%r' % (bool(expand), ))
if fill:
props.append('fill=%r' % (bool(fill), ))
if padding != 0:
props.append('padding=%d' % (padding, ))
if pack_type == gtk.PACK_END:
props.append('pack-end')
return props
def _dump_children(self, widget, indent):
indent += 1
if isinstance(widget, gtk.Table):
def table_sort(a, b):
props_a = _get_table_packing_properties(widget, a)
props_b = _get_table_packing_properties(widget, b)
return cmp(props_a, props_b)
for child in sorted(widget.get_children(),
cmp=table_sort):
self._dump_widget(child, indent)
elif isinstance(widget, gtk.Container):
for child in widget.get_children():
self._dump_widget(child, indent)
elif isinstance(widget, gtk.Bin):
self._dump_widget([widget.get_child()], indent)
if isinstance(widget, gtk.MenuItem):
menu = widget.get_submenu()
if menu is not None:
self._dump_widget(menu, indent)
def _dump_widget(self, widget, indent=0):
if isinstance(widget, gtk.Window):
self._dump_window(widget, indent)
elif isinstance(widget, gtk.Entry):
self._dump_entry(widget, indent)
elif isinstance(widget, gtk.ToggleButton):
self._dump_toggle_button(widget, indent)
elif isinstance(widget, gtk.Button):
self._dump_button(widget, indent)
elif isinstance(widget, gtk.Label):
self._dump_label(widget, indent)
elif isinstance(widget, (ProxyComboBox, ProxyComboEntry)):
self._dump_proxy_combo(widget, indent)
elif isinstance(widget, ProxyDateEntry):
self._dump_proxy_date_entry(widget, indent)
elif isinstance(widget, gtk.IconView):
self._dump_iconview(widget, indent)
elif isinstance(widget, ObjectList):
self._dump_objectlist(widget, indent)
elif isinstance(widget, gtk.EventBox):
self._dump_event_box(widget, indent)
elif isinstance(widget, gtk.MenuItem):
self._dump_menu_item(widget, indent)
elif isinstance(widget, gtk.ToolItem):
self._dump_tool_item(widget, indent)
else:
self._write_widget(widget, indent)
self._dump_children(widget, indent)
def _is_interactive_widget(self, widget):
# FIXME: Add more widgets, but needs a careful audit
return isinstance(widget, (gtk.Entry, ))
def _write_widget(self, widget, indent=0, props=None, extra=None):
extra = extra or []
line_props = []
name = self._items.get(hash(widget), '')
if name:
line_props.append(name)
line_props.extend(self._get_packing_properties(widget))
spaces = (' ' * (indent * 2))
if not props:
props = []
if not widget.get_visible():
props.append('hidden')
if not widget.get_sensitive():
props.append('insensitive')
if (widget.get_sensitive() and
widget.get_visible() and
not widget.get_can_focus() and
self._is_interactive_widget(widget)):
props.append('unfocusable')
fmt = "%s %s is not focusable"
self.failures.append(fmt % (gobject.type_name(widget),
self._items.get(hash(widget),
'???')))
if IValidatableProxyWidget.providedBy(widget):
if (not widget.is_valid() and
widget.get_sensitive() and
widget.get_visible()):
if widget.mandatory:
props.append('mandatory')
else:
props.append('invalid')
if props:
prop_lines = ' ' + ', '.join(props)
else:
prop_lines = ''
self.output += "%s%s(%s):%s\n" % (
spaces,
gobject.type_name(widget),
', '.join(line_props),
prop_lines)
spaces = (' ' * ((indent + 1) * 2))
for line in extra:
self.output += spaces + line + '\n'
# Gtk+
def _dump_window(self, window, indent):
props = ['title=%r' % (window.get_title())]
self._write_widget(window, indent, props)
self._dump_children(window, indent)
def _dump_event_box(self, eventbox, indent):
slave_name = self._slave_holders.get(eventbox)
props = []
if slave_name:
props.append('slave %s is attached' % (slave_name, ))
self._write_widget(eventbox, indent, props)
self._dump_children(eventbox, indent)
def _dump_button(self, button, indent, props=None):
if props is None:
props = []
label = button.get_label()
if label:
props.insert(0, repr(label))
self._write_widget(button, indent, props)
def _dump_entry(self, entry, indent):
text = repr(entry.get_text())
props = [text]
if not entry.get_editable():
props.append('ineditable')
if isinstance(entry, gtk.SpinButton):
if entry.props.wrap:
props.append('wrappable')
self._write_widget(entry, indent, props)
def _dump_label(self, label, indent):
if (isinstance(label, gtk.AccelLabel) and
isinstance(label.get_parent(), gtk.MenuItem)):
return
props = []
lbl = label.get_label()
if lbl:
props.append(repr(lbl))
self._write_widget(label, indent, props)
def _dump_toggle_button(self, toggle, indent):
props = []
if toggle.get_active():
props.append('active')
self._dump_button(toggle, indent, props)
def _dump_menu_item(self, menuitem, indent):
# GtkUIManager creates plenty of invisible separators
if (isinstance(menuitem, gtk.SeparatorMenuItem) and
not menuitem.get_visible()):
return
# GtkUIManager creates empty items at the end of lists
if (type(menuitem) == gtk.MenuItem and
not menuitem.get_visible() and
not menuitem.get_sensitive() and
menuitem.get_label() == 'Empty'):
return
# Skip tearoff menus
if (isinstance(menuitem, gtk.TearoffMenuItem) and
not menuitem.get_visible()):
return
props = []
label = menuitem.get_label()
if (isinstance(menuitem, gtk.ImageMenuItem) and
menuitem.get_use_stock()):
props.append('stock=%r' % (label, ))
elif label:
props.append(repr(label))
self._write_widget(menuitem, indent, props)
self._dump_children(menuitem, indent)
def _dump_tool_item(self, toolitem, indent):
# GtkUIManager creates plenty of invisible separators
if (isinstance(toolitem, gtk.SeparatorToolItem) and
not toolitem.get_visible()):
return
props = []
if isinstance(toolitem, gtk.ToolButton):
label = toolitem.get_label()
if label:
props.append(repr(label))
self._write_widget(toolitem, indent, props)
if isinstance(toolitem, gtk.MenuToolButton):
menu = toolitem.get_menu()
if menu:
self._dump_widget(menu, indent + 2)
def _dump_iconview(self, iconview, indent):
extra = []
model = iconview.get_model()
markup_id = iconview.get_markup_column()
text_id = iconview.get_text_column()
pixbuf_id = iconview.get_pixbuf_column()
for row in model:
cols = []
if markup_id != -1:
cols.append('markup: ' + row[markup_id])
if text_id != -1:
cols.append('text: ' + row[text_id])
if pixbuf_id != -1:
stock_id = getattr(row[pixbuf_id], 'stock_id', None)
if stock_id:
cols.append('stock: ' + stock_id)
extra.append(', '.join(cols))
self._write_widget(iconview, indent, extra=extra)
# Kiwi
def _dump_proxy_date_entry(self, dateentry, indent):
props = [repr(dateentry.get_date())]
self._write_widget(dateentry, indent, props)
def _dump_proxy_combo(self, combo, indent):
extra = []
selected = combo.get_selected_label()
labels = combo.get_model_strings()
if (labels and labels[0] == 'Afghanistan' and
sorted(labels) == sorted(countries)):
labels = [selected,
'... %d more countries ...' % (len(countries) - 1)]
for label in labels:
line = [repr(label)]
if label == selected:
line.append('selected')
extra.append('item: ' + ', '.join(line))
self._write_widget(combo, indent, extra=extra)
def _dump_objectlist(self, objectlist, indent):
extra = []
is_tree = isinstance(objectlist, ObjectTree)
for column in objectlist.get_columns():
col = []
col.append('title=%r' % (column.title))
if not column.visible:
col.append('hidden')
if column.expand:
col.append('expand')
extra.append('column: ' + ', '.join(col))
def append_row(row, extra_indent=0):
inst = row[0]
cols = []
cols = [repr(kgetattr(inst, col.attribute, None)) for
col in objectlist.get_columns()]
extra.append("%srow: %s" % (
' ' * extra_indent, ', '.join(cols)))
if is_tree:
extra_indent = extra_indent + 2
for child in row.iterchildren():
append_row(child, extra_indent=extra_indent)
model = objectlist.get_model()
for row in model:
append_row(row)
self._write_widget(objectlist, indent, extra=extra)
def dump_widget(self, widget):
self._add_namespace(widget)
self.output += 'widget: %s\n' % (widget.__class__.__name__, )
self._dump_widget(widget)
def dump_editor(self, editor):
self._add_namespace(editor)
self._add_namespace(editor.main_dialog, 'main_dialog.')
self.output += 'editor: %s\n' % (editor.__class__.__name__, )
self._dump_widget(editor.main_dialog.get_toplevel())
def dump_wizard(self, wizard):
self._add_namespace(wizard)
step = wizard.get_current_step()
if step:
self._add_namespace(step, 'step.')
self.output += 'wizard: %s\n' % (wizard.__class__.__name__, )
self._dump_widget(wizard.get_toplevel())
def dump_dialog(self, dialog):
self._add_namespace(dialog)
self.output += 'dialog: %s\n' % (dialog.__class__.__name__, )
self._dump_widget(dialog.get_toplevel())
def dump_slave(self, slave):
self._add_namespace(slave)
self.output += 'slave: %s\n' % (slave.__class__.__name__, )
self._dump_widget(slave.get_toplevel())
def dump_search(self, search):
self._add_namespace(search)
self.output += 'search: %s\n' % (search.__class__.__name__, )
self._dump_widget(search.get_toplevel())
def dump_app(self, app):
self._add_namespace(app)
self.output += 'app: %s\n' % (app.__class__.__name__, )
self._dump_widget(app.get_toplevel())
popups = app.uimanager.get_toplevels(gtk.UI_MANAGER_POPUP)
for popup in popups:
self.output += '\n'
self.output += 'popup: %s\n' % (popup.get_name(), )
self._dump_widget(popup)
def dump_models(self, models):
if not models:
return
self.output += '\n'
counter = 1
ns = {}
for model in models:
model_name = '%s<%d>' % (type(model).__name__,
counter)
ns[model] = model_name
counter += 1
for model in models:
self._dump_model(ns, model)
def _dump_model(self, ns, model):
if model is None:
self.output += 'model: None\n'
return
self.output += 'model: %s\n' % (ns[model], )
info = get_cls_info(type(model))
for col in info.columns:
if col.name == 'id' or col.name == 'identifier':
continue
if col.name.endswith('_id'):
value = getattr(model, col.name[:-3], None)
if value in ns:
self.output += ' %s: %s\n' % (col.name, ns[value])
continue
value = getattr(model, col.name, None)
if isinstance(value, datetime.datetime):
# Strip hours/minutes/seconds so today() works
value = datetime.datetime(value.year,
value.month,
value.day)
self.output += ' %s: %r\n' % (col.name, value)
self.output += '\n'
# FIXME: To be able to create ui tests outside stoq, we need to be able
# to get tests data dir from there. Maybe we should use
# provide_utility/get_utility?
stoq_dir = get_tests_datadir('ui')
class GUITest(DomainTest):
def setUp(self):
self._unhandled_exceptions = []
self._old_hook = sys.excepthook
sys.excepthook = self._except_hook
test_system_notifier.reset()
DomainTest.setUp(self)
def tearDown(self):
sys.excepthook = self._old_hook
DomainTest.tearDown(self)
messages = test_system_notifier.reset()
if messages:
self.fail("Unhandled messages: %r, use @mock.patch()" % (
messages, ))
if self._unhandled_exceptions:
self.fail("Unhandled exceptions: %r" % (
self._unhandled_exceptions))
def _except_hook(self, exc_type, exc_value, exc_traceback):
self._unhandled_exceptions.append((exc_type, exc_value, exc_traceback))
traceback.print_exception(exc_type, exc_value, exc_traceback)
def _get_ui_filename(self, name):
return os.path.join(stoq_dir, name + '.uitest')
def click(self, button):
"""Simulates a click on a button.
This verifies that the button is clickable (visible and sensitive) and
emits the clicked signal
"""
if not isinstance(button, gtk.Button):
raise TypeError("%r must be a button" % (button, ))
if not button.get_visible():
self.fail("button is not visible")
return
if not button.get_sensitive():
self.fail("button is not sensitive")
return
button.clicked()
def activate(self, widget):
"""Simulates activation on a widget
This verifies that the button is activatable (visible and sensitive) and
emits the activate signal
"""
if not isinstance(widget, (gtk.Action, gtk.Widget)):
raise TypeError("%r must be an action or a widget" % (widget, ))
if not widget.get_visible():
self.fail("widget is not visible")
return
if not widget.get_sensitive():
self.fail("widget is not sensitive")
return
widget.activate()
def assertInvalid(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
if value.is_valid():
self.fail("%s.%s should be invalid" % (
dialog.__class__.__name__, attr))
def assertValid(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
if not value.is_valid():
self.fail("%s.%s should be valid" % (
dialog.__class__.__name__, attr))
def assertSensitive(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
# If the widget is sensitive, we also expect it to be visible
if not value.get_sensitive() or not value.get_visible():
self.fail("%s.%s should be sensitive" % (
dialog.__class__.__name__, attr))
def assertNotSensitive(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
if value.get_sensitive():
self.fail("%s.%s should not be sensitive" % (
dialog.__class__.__name__, attr))
def assertVisible(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
if not value.get_visible():
self.fail("%s.%s should be visible" % (
dialog.__class__.__name__, attr))
def assertNotVisible(self, dialog, attributes):
for attr in attributes:
value = getattr(dialog, attr)
if value.get_visible():
self.fail("%s.%s should not be visible" % (
dialog.__class__.__name__, attr))
def check_widget(self, widget, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_widget(widget)
dumper.dump_models(models)
self.check_filename(dumper, ui_test_name, ignores)
def check_wizard(self, wizard, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_wizard(wizard)
dumper.dump_models(models)
self.check_filename(dumper, ui_test_name, ignores)
def check_editor(self, editor, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_editor(editor)
dumper.dump_models(models)
self.check_filename(dumper, ui_test_name, ignores)
def check_dialog(self, dialog, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_dialog(dialog)
dumper.dump_models(models)
self.check_filename(dumper, ui_test_name, ignores)
def check_slave(self, slave, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_slave(slave)
dumper.dump_models(models)
self.check_filename(dumper, ui_test_name, ignores)
def check_search(self, search, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_search(search)
dumper.dump_models(models)
self.check_filename(dumper, 'search-' + ui_test_name, ignores)
def check_app(self, app, ui_test_name, models=None, ignores=None):
models = models or []
ignores = ignores or []
dumper = GUIDumper()
dumper.dump_app(app)
dumper.dump_models(models)
self.check_filename(dumper, 'app-' + ui_test_name, ignores)
def check_filename(self, dumper, ui_test_name, ignores=None):
ignores = ignores or []
text = dumper.output
for ignore in ignores:
text = text.replace(ignore, '%% FILTERED BY UNITTEST %%')
today = datetime.date.today()
text = text.replace(repr(today), 'date.today()')
text = text.replace(today.strftime('%x'), "YYYY-MM-DD")
text = text.replace(today.strftime('%Y-%m-%d'), "YYYY-MM-DD")
text = text.replace(
repr(datetime.datetime(today.year, today.month, today.day)),
'datetime.today()')
text = _UUID_RE.sub("uuid.uuid()", text)
if os.environ.get('STOQ_USE_GI', '') == '3.0':
# These are internal changes of GtkDialog which we
# don't want to see.
# They can safely be removed when we drop PyGTK support
# GtkHButtonBox doesn't exist any longer and we don't
# use GtkVButtonBox
text = text.replace('GtkButtonBox', 'GtkHButtonBox')
text = text.replace(
'GtkBox(PluggableWizard-vbox',
'GtkVBox(PluggableWizard-vbox')
text = text.replace(
'GtkBox(main_dialog._main_vbox',
'GtkVBox(main_dialog._main_vbox')
text = text.replace(
'GtkBox(_main_vbox',
'GtkVBox(_main_vbox')
text = text.replace('stoq+lib+gicompat+', 'Gtk')
filename = self._get_ui_filename(ui_test_name)
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write(text)
self._check_failures(dumper)
return
lines = [(line + '\n') for line in text.split('\n')][:-1]
with open(filename) as f:
expected = f.readlines()
difference = diff_lines(expected,
lines,
short=filename[len(stoq_dir) + 1:])
# Allow users to easily update uitests by running, for example:
# $ STOQ_REPLACE_UITESTS=1 make check-failed
replace_tests = os.environ.get('STOQ_REPLACE_UITESTS', False)
if difference and replace_tests:
print(("\n ** The test %s differed, but being replaced since "
"STOQ_REPLACE_UITESTS is set **" % filename))
with open(filename, 'w') as f:
f.write(text)
elif difference:
self.fail('ui test %s failed:\n%s' % (
ui_test_name, difference))
self._check_failures(dumper)
def _check_failures(self, dumper):
# Make sure unfocused is never saved, this should happen after
# the difference above, since that is a much more useful error message
# (with a complete diff) rather than just an error message
if dumper.failures:
self.fail(dumper.failures)
|
andrebellafronte/stoq
|
stoqlib/gui/test/uitestutils.py
|
Python
|
gpl-2.0
| 26,129
|
[
"VisIt"
] |
be0d4e4049725485ecb4709a78fe6cd764906977ee554d61009e414946b359cd
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# prevent the tk window from showing up then start the event loop
renWin = vtk.vtkRenderWindow()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin.AddRenderer(ren1)
renWin.SetSize(400,400)
puzzle = vtk.vtkSpherePuzzle()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(puzzle.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
arrows = vtk.vtkSpherePuzzleArrows()
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(arrows.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.AddActor(actor2)
ren1.SetBackground(0.1,0.2,0.4)
LastVal = -1
def MotionCallback (x,y,__vtk__temp0=0,__vtk__temp1=0):
global LastVal
# Compute display point from Tk display point.
WindowY = 400
y = expr.expr(globals(), locals(),["WindowY","-","y"])
z = ren1.GetZ(x,y)
ren1.SetDisplayPoint(x,y,z)
ren1.DisplayToWorld()
pt = ren1.GetWorldPoint()
#tk_messageBox -message "$pt"
x = lindex(pt,0)
y = lindex(pt,1)
z = lindex(pt,2)
val = puzzle.SetPoint(x,y,z)
if (val != LastVal):
renWin.Render()
LastVal = val
pass
def ButtonCallback (x,y,__vtk__temp0=0,__vtk__temp1=0):
# Compute display point from Tk display point.
WindowY = 400
y = expr.expr(globals(), locals(),["WindowY","-","y"])
z = ren1.GetZ(x,y)
ren1.SetDisplayPoint(x,y,z)
ren1.DisplayToWorld()
pt = ren1.GetWorldPoint()
#tk_messageBox -message "$pt"
x = lindex(pt,0)
y = lindex(pt,1)
z = lindex(pt,2)
# Had to move away from mose events (sgi RT problems)
i = 0
while i <= 100:
puzzle.SetPoint(x,y,z)
puzzle.MovePoint(i)
renWin.Render()
i = expr.expr(globals(), locals(),["i","+","5"])
renWin.Render()
cam = ren1.GetActiveCamera()
cam.Elevation(-40)
puzzle.MoveHorizontal(0,100,0)
puzzle.MoveHorizontal(1,100,1)
puzzle.MoveHorizontal(2,100,0)
puzzle.MoveVertical(2,100,0)
puzzle.MoveVertical(1,100,0)
renWin.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Modeling/Testing/Python/TestSpherePuzzle.py
|
Python
|
gpl-3.0
| 2,239
|
[
"VTK"
] |
4b58ef16737b952c16d5d0db3c2dbd63a7dcb89e573acd8167038b516e36de2d
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 2000
n = 4000
numLambdas = 5
startLambda = 0.01
endLambda = 1
display = True
worldRank = El.mpi.WorldRank()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
firstLocalRow = A.FirstLocalRow()
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = firstLocalRow + sLoc
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.MakeConsistent()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
for j in xrange(0,numLambdas):
lambd = startLambda + j*(endLambda-startLambda)/(numLambdas-1.)
if worldRank == 0:
print "lambda =", lambd
startDS = time.clock()
x = El.DS( A, b, lambd, ctrl )
endDS = time.clock()
if worldRank == 0:
print "DS time: ", endDS-startDS
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
r = El.DistMultiVec()
El.Copy( b, r )
El.SparseMultiply( El.NORMAL, -1., A, x, 1., r )
rTwoNorm = El.Nrm2( r )
t = El.DistMultiVec()
El.Zeros( t, n, 1 )
El.SparseMultiply( El.TRANSPOSE, 1., A, r, 0., t )
tTwoNorm = El.Nrm2( t )
tInfNorm = El.MaxNorm( t )
if display:
El.Display( r, "r" )
El.Display( t, "t" )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| b - A x ||_2 =", rTwoNorm
print "|| A^T (b - A x) ||_2 =", tTwoNorm
print "|| A^T (b - A x) ||_oo =", tInfNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
sg0/Elemental
|
examples/interface/DS.py
|
Python
|
bsd-3-clause
| 2,435
|
[
"Gaussian"
] |
2fa54809ac5cee28b9c0e8452d52748b19ca3c79ff75b88f80facb07004074f7
|
# -*- coding: utf-8 -*-
import json
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.choiceboxext import ChoiceBoxExt
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
from Plugins.Extensions.MediaPortal.resources.yt_url import isVEVODecryptor
from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer
from Plugins.Extensions.MediaPortal.resources.menuhelper import MenuHelper
from Plugins.Extensions.MediaPortal.resources.twagenthelper import twAgentGetPage
YT_Version = "Youtube Search v3.50"
YT_siteEncoding = 'utf-8'
useProxy = lambda : config.mediaportal.premiumize_use.value and config.mediaportal.sp_use_yt_with_proxy.value
config.mediaportal.yt_param_regionid_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_time_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_meta_idx = ConfigInteger(default = 1)
config.mediaportal.yt_paramListIdx = ConfigInteger(default = 0)
config.mediaportal.yt_param_3d_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_duration_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_video_definition_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_event_types_idx = ConfigInteger(default = 0)
config.mediaportal.yt_param_video_type_idx = ConfigInteger(default = 0)
config.mediaportal.yt_refresh_token = ConfigText(default="")
APIKEYV3 = 'AIzaSyBPEkhZzAvfYQZYLmIQcOsklbZbTbymjb0'
param_hl = ('&hl=en_GB', '&hl=de_DE', '&hl=fr_FR', '&hl=it_IT', '')
class youtubeGenreScreen(MenuHelper):
def __init__(self, session):
global yt_oauth2
self.param_qr = ""
self.param_author = ""
self.old_mainidx = -1
self.param_safesearch = ['&safeSearch=none']
self.param_format = '&format=5'
self.subCat = [(_('No Category'), '')]
self.subCat_L2 = [None]
self.param_time = [
(_("Date"), "&order=date"),
(_("Rating"), "&order=rating"),
(_("Relevance"), "&order="),
(_("Title"), "&order=title"),
(_("Video count"), "&order=videoCount"),
(_("View count"), "&order=viewCount")
]
self.param_metalang = [
(_('English'), '&relevanceLanguage=en'),
(_('German'), '&relevanceLanguage=de'),
(_('French'), '&relevanceLanguage=fr'),
(_('Italian'), '&relevanceLanguage=it'),
(_('Any'), '')
]
self.param_regionid = [
(_('Whole world'), '®ionCode=US'),
(_('England'), '®ionCode=GB'),
(_('Germany'), '®ionCode=DE'),
(_('France'), '®ionCode=FR'),
(_('Italy'), '®ionCode=IT')
]
self.param_duration = [
(_('Any'), ''),
('< 4 Min', '&videoDuration=short'),
('4..20 Min', '&videoDuration=medium'),
('> 20 Min', '&videoDuration=long')
]
self.param_3d = [
(_('Any'), ''),
(_('2D'), '&videoDimension=2d'),
(_('3D'), '&videoDimension=3d')
]
self.param_video_definition = [
(_('Any'), ''),
(_('High'), '&videoDefinition=high'),
(_('Low'), '&videoDefinition=standard')
]
self.param_event_types = [
(_('None'), ''),
(_('Completed'), '&eventType=completed'),
(_('Live'), '&eventType=live'),
(_('Upcoming'), '&eventType=upcoming')
]
self.param_video_type = [
(_('Any'), ''),
(_('Episode'), '&videoType=episode'),
(_('Movie'), '&videoType=movie')
]
self.paramList = [
(_('Search request'), (self.paraQuery, None), (0,1,2)),
(_('Event type'), (self.param_event_types, config.mediaportal.yt_param_event_types_idx), (0,)),
(_('Sort by'), (self.param_time, config.mediaportal.yt_param_time_idx), (0,1,2)),
(_('Language'), (self.param_metalang, config.mediaportal.yt_param_meta_idx), (0,1,2,4)),
(_('Search region'), (self.param_regionid, config.mediaportal.yt_param_regionid_idx), (0,1,2,4)),
(_('User name'), (self.paraAuthor, None), (0,1,2)),
(_('3D Search'), (self.param_3d, config.mediaportal.yt_param_3d_idx), (0,)),
(_('Runtime'), (self.param_duration, config.mediaportal.yt_param_duration_idx), (0,)),
(_('Video definition'), (self.param_video_definition, config.mediaportal.yt_param_video_definition_idx), (0,)),
(_('Video type'), (self.param_video_type, config.mediaportal.yt_param_video_type_idx), (0,))
]
self.subCatUserChannel = [
('Start', '/featured?'),
('Videos', '/videos?'),
('Playlists', '/playlists?'),
('Channels', '/channels?')
]
self.subCatMusicGenres = [
('Featured Playlists','https://www.youtube.com/channel/UC-9-kyTW8ZkZNDHQJ6FgpwQ/featured?'),
('All Playlists','https://www.youtube.com/channel/UC-9-kyTW8ZkZNDHQJ6FgpwQ/playlists?view=1&sort=lad&'),
('Genres','https://www.youtube.com/channel/%s/videos?'),
('All Video Uploads','https://www.youtube.com/channel/UC-9-kyTW8ZkZNDHQJ6FgpwQ/videos?')
]
self.subCatMusicChannels = [
('Rap & Hip-Hop', 'UCUnSTiCHiHgZA9NQUG6lZkQ'),
('Rock', 'UCRZoK7sezr5KRjk7BBjmH6w'),
('Popmusik', 'UCE80FOXpJydkkMo-BYoJdEg'),
('Klassische Musik', 'UCLwMU2tKAlCoMSbGQDuiMSg'),
('Country', 'UClYMFaf6IdjQnZmsnw9N1hQ'),
('Jazz', 'UC7KZmdQxhcajZSEFLJr3gCg'),
('Disco', 'UCNGkvx5UwHzqlo6zDgRDYsQ'),
('Blues', 'UCYlU_M1PLtYZ6qTfKIUlxLQ'),
('Alternative Rock', 'UCHtUkBSmt4d92XP8q17JC3w'),
('Soul', 'UCsFaF_3y_L__y8kWAIEhv1w'),
('Funk', 'UCxk1wRJGOTmzJAbvbQ8VicQ'),
('R&B', 'UCvwDeZSN2oUHlLWYRLvKceA'),
('Reggae', 'UCEdvzYtzTH_FFpB3VRjFV6Q'),
("Children's Music", 'UCMBT_zT5NtEG_3Nn3XSPTxw'),
('Volksmusic', 'UCbMcht964OUJoeVi9oxFcKg'),
('Fingerstyle', 'UC63oXoh_yThcEiUmHbAiLiw'),
('Folk', 'UC9GxgUzRt2qUIII3tSSRjwQ'),
('Elektronische Musik', 'UCCIPrrom6DIftcrInjeMvsQ'),
('Lateinamerikanische Musik', 'UCYYsyo5ekR-2Nw10s4mj3pQ'),
('New Age', 'UCfqBDMEJrevX2_2XBUSxAqg'),
('K-Pop', 'UCsEonk9fs_9jmtw9PwER9yg'),
('Afrikanische Musik', 'UCadO807x4w5SAo-KKnQTMcA'),
('Arabische Musik', 'UCCStUvXbY5TbjDYJD_xKByQ'),
('Vokalmusik', 'UCrrrTqJSxijC3hIJ-2oL8mw'),
('Geistliche Musik', 'UCiIRzxB4CUW9vt5js6UFCRQ'),
('Comedy music', 'UCxKwRTQMME5HahBLLLMMELg'),
('Music of Asia', 'UCDQ_5Wcc54n1_GrAzf05uWQ'),
('Weltmusik', 'UCMHQZBr9QGPkACZ4hu2wqbQ'),
('Elektronische Tanzmusik', 'UCeAIo5P3sKEiuhGn-rExx7Q'),
('Techno', 'UCQLbTKToYT86oML-jx_DJMA'),
('Trance', 'UC5d4piMBQlBQRFpS9m_8UZQ'),
('Indische Musik', 'UC4K4LBy_IQGmQrAQVIa1JlA'),
('Pop-Rock', 'UCcu0YYUpyosw5_sLnK4wK4A'),
('Turkish pop music', 'UC7PC8CGB-pU7OJgMGhXIA_g'),
('Softrock', 'UCFGhkqw3_rCSBTb2_i0P0Zg')
]
self.subCatMusicChannels.sort(key=lambda t : t[0].lower())
self.subCatYourChannel = [
('Favorites', 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&mine=true&access_token=%ACCESSTOKEN%%playlistId=favorites%'),
('History', 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&mine=true&access_token=%ACCESSTOKEN%%playlistId=watchHistory%'),
('Likes', 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&mine=true&access_token=%ACCESSTOKEN%%playlistId=likes%'),
('New Subscription Videos', 'https://www.googleapis.com/youtube/v3/activities?part=contentDetails%2Csnippet&home=true&access_token=%ACCESSTOKEN%%ACT-upload%'),
('Playlists', 'https://www.googleapis.com/youtube/v3/playlists?part=snippet%2Cid&mine=true&access_token=%ACCESSTOKEN%'),
('Recommendations', 'https://www.googleapis.com/youtube/v3/activities?part=contentDetails%2Csnippet&home=true&access_token=%ACCESSTOKEN%%ACT-recommendation%'),
('Subscriptions', 'https://www.googleapis.com/youtube/v3/subscriptions?part=snippet&mine=true&access_token=%ACCESSTOKEN%'),
('Uploads', 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&mine=true&access_token=%ACCESSTOKEN%%playlistId=uploads%'),
('Watch Later', 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&mine=true&access_token=%ACCESSTOKEN%%playlistId=watchLater%')
]
self.mainGenres = [
('Video search', 'https://www.googleapis.com/youtube/v3/search?part=snippet&q=%QR%&type=video&key=%KEY%'),
('Playlist search', 'https://www.googleapis.com/youtube/v3/search?part=snippet&q=%QR%&type=playlist&key=%KEY%'),
('Channel search', 'https://www.googleapis.com/youtube/v3/search?part=snippet&q=%QR%&type=channel&key=%KEY%'),
('Your channel', ''),
('Guide Categories', 'https://www.googleapis.com/youtube/v3/guideCategories?part=snippet&key=%KEY%'),
('Favoriten', ''),
('Beliebt auf YouTube - Deutschland', 'http://www.youtube.com/channel/UCK274iXLZhs8MFGLsncOyZQ'),
('Sport', 'https://www.youtube.com/channel/UCEgdi0XIXXZ-qJOFPf4JSKw'),
('KinoCheck', 'https://www.youtube.com/user/KinoCheck'),
('#Live', 'https://www.youtube.com/channel/UC4R8DWoMoI7CAwX8_LjQHig')
]
if useProxy() and isVEVODecryptor:
self.mainGenres.append(('Youtube Music', ''))
self.mainGenres.append(('VEVO Music', 'https://www.youtube.com/user/VEVO'))
MenuHelper.__init__(self, session, 2, None, "", "", self._defaultlistcenter, "ytSearchScreen.xml")
self["yt_actions"] = ActionMap(["MP_Actions"], {
"yellow": self.keyYellow,
"blue": self.login
}, -1)
self['title'] = Label(YT_Version)
self['ContentTitle'] = Label(_("VIDEOSEARCH"))
self['Query'] = Label(_("Search request"))
self['query'] = Label()
self['Time'] = Label(_("Sort by"))
self['time'] = Label()
self['Metalang'] = Label(_("Language"))
self['metalang'] = Label()
self['Regionid'] = Label(_("Search region"))
self['regionid'] = Label()
self['Author'] = Label(_("User name"))
self['author'] = Label()
self['Keywords'] = Label(_("Event type"))
self['keywords'] = Label()
self['Parameter'] = Label(_("Parameter"))
self['ParameterToEdit'] = Label()
self['parametertoedit'] = Label()
self['3D'] = Label(_("3D Search"))
self['3d'] = Label()
self['Duration'] = Label(_("Runtime"))
self['duration'] = Label()
self['Reserve1'] = Label(_("Video definition"))
self['reserve1'] = Label()
self['Reserve2'] = Label(_("Video type"))
self['reserve2'] = Label()
self['F3'] = Label(_("Edit Parameter"))
self['F4'] = Label(_("Request YT-Token"))
self.onLayoutFinish.append(self.initSubCat)
self.mh_On_setGenreStrTitle.append((self.keyYellow, [0]))
self.onClose.append(self.saveIdx)
self.channelId = None
def initSubCat(self):
hl = param_hl[config.mediaportal.yt_param_meta_idx.value]
rc = self.param_regionid[config.mediaportal.yt_param_regionid_idx.value][1].split('=')[-1]
if not rc:
rc = 'US'
url = 'https://www.googleapis.com/youtube/v3/videoCategories?part=snippet%s®ionCode=%s&key=%s' % (hl, rc, APIKEYV3)
twAgentGetPage(url).addCallback(self.parseCats)
def parseCats(self, data):
data = json.loads(data)
self.subCat = [(_('No Category'), '')]
self.subCat_L2 = [None]
for item in data.get('items', {}):
self.subCat.append((str(item['snippet']['title']), '&videoCategoryId=%s' % str(item['id'])))
self.subCat_L2.append(None)
self.mh_genreMenu = [
self.mainGenres,
[
self.subCat, None, None, self.subCatYourChannel, None, None, self.subCatUserChannel, self.subCatUserChannel, self.subCatUserChannel, self.subCatUserChannel, self.subCatMusicGenres, self.subCatUserChannel
],
[
self.subCat_L2,
None,
None,
[None,None,None,None,None,None,None,None,None],
None,
None,
[None,None,None,None],
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
[None, None, self.subCatMusicChannels, None],
[None, None, None,None]
]
]
self.mh_loadMenu()
def paraQuery(self):
self.session.openWithCallback(self.cb_paraQuery, VirtualKeyBoardExt, title = (_("Enter search criteria")), text = self.param_qr, is_dialog=True)
def cb_paraQuery(self, callback = None, entry = None):
if callback != None:
self.param_qr = callback.strip()
self.showParams()
def paraAuthor(self):
self.session.openWithCallback(self.cb_paraAuthor, VirtualKeyBoardExt, title = (_("Author")), text = self.param_author, is_dialog=True)
def cb_paraAuthor(self, callback = None, entry = None):
if callback != None:
self.param_author = callback.strip()
self.channelId = None
self.showParams()
def showParams(self):
try:
self['query'].setText(self.param_qr)
self['time'].setText(self.param_time[config.mediaportal.yt_param_time_idx.value][0])
self['reserve1'].setText(self.param_video_definition[config.mediaportal.yt_param_video_definition_idx.value][0])
self['reserve2'].setText(self.param_video_type[config.mediaportal.yt_param_video_type_idx.value][0])
self['metalang'].setText(self.param_metalang[config.mediaportal.yt_param_meta_idx.value][0])
self['regionid'].setText(self.param_regionid[config.mediaportal.yt_param_regionid_idx.value][0])
self['3d'].setText(self.param_3d[config.mediaportal.yt_param_3d_idx.value][0])
self['duration'].setText(self.param_duration[config.mediaportal.yt_param_duration_idx.value][0])
self['author'].setText(self.param_author)
self['keywords'].setText(self.param_event_types[config.mediaportal.yt_param_event_types_idx.value][0])
except:
pass
self.paramShowHide()
def paramShowHide(self):
if self.old_mainidx == self.mh_menuIdx[0]:
return
else:
self.old_mainidx = self.mh_menuIdx[0]
showCtr = 0
if self.mh_menuIdx[0] in self.paramList[0][2]:
self['query'].show()
self['Query'].show()
showCtr = 1
else:
self['query'].hide()
self['Query'].hide()
if self.mh_menuIdx[0] in self.paramList[1][2]:
self['keywords'].show()
self['Keywords'].show()
showCtr = 1
else:
self['keywords'].hide()
self['Keywords'].hide()
if self.mh_menuIdx[0] in self.paramList[2][2]:
self['time'].show()
self['Time'].show()
showCtr = 1
else:
self['time'].hide()
self['Time'].hide()
if self.mh_menuIdx[0] in self.paramList[3][2]:
self['metalang'].show()
self['Metalang'].show()
showCtr = 1
else:
self['metalang'].hide()
self['Metalang'].hide()
if self.mh_menuIdx[0] in self.paramList[4][2]:
self['regionid'].show()
self['Regionid'].show()
showCtr = 1
else:
self['regionid'].hide()
self['Regionid'].hide()
if self.mh_menuIdx[0] in self.paramList[5][2]:
self['author'].show()
self['Author'].show()
showCtr = 1
else:
self['author'].hide()
self['Author'].hide()
if self.mh_menuIdx[0] in self.paramList[6][2]:
self['3d'].show()
self['3D'].show()
showCtr = 1
else:
self['3d'].hide()
self['3D'].hide()
if self.mh_menuIdx[0] in self.paramList[7][2]:
self['duration'].show()
self['Duration'].show()
showCtr = 1
else:
self['duration'].hide()
self['Duration'].hide()
if self.mh_menuIdx[0] in self.paramList[8][2]:
self['reserve1'].show()
self['Reserve1'].show()
showCtr = 1
else:
self['reserve1'].hide()
self['Reserve1'].hide()
if self.mh_menuIdx[0] in self.paramList[9][2]:
self['reserve2'].show()
self['Reserve2'].show()
showCtr = 1
else:
self['reserve2'].hide()
self['Reserve2'].hide()
if showCtr:
self['F3'].show()
else:
self['F3'].hide()
def mh_loadMenu(self):
self.showParams()
self.mh_setMenu(0, True)
self.mh_keyLocked = False
def keyYellow(self, edit=1):
c = len(self.paramList)
list = []
if config.mediaportal.yt_paramListIdx.value not in range(0, c):
config.mediaportal.yt_paramListIdx.value = 0
old_idx = config.mediaportal.yt_paramListIdx.value
for i in range(c):
if self.mh_menuIdx[0] in self.paramList[i][2]:
list.append((self.paramList[i][0], i))
if list and edit:
self.session.openWithCallback(self.cb_handlekeyYellow, ChoiceBoxExt, title=_("Edit Parameter"), list = list, selection=old_idx)
else:
self.showParams()
def cb_handlekeyYellow(self, answer):
pidx = answer and answer[1]
if pidx != None:
config.mediaportal.yt_paramListIdx.value = pidx
if type(self.paramList[pidx][1][0]) == list:
self.changeListParam(self.paramList[pidx][0], *self.paramList[pidx][1])
else:
self.paramList[pidx][1][0]()
self.showParams()
def changeListParam(self, nm, l, idx):
if idx.value not in range(0, len(l)):
idx.value = 0
list = []
for i in range(len(l)):
list.append((l[i][0], (i, idx)))
if list:
self.session.openWithCallback(self.cb_handleListParam, ChoiceBoxExt, title=_("Edit Parameter") + " '%s'" % nm, list = list, selection=idx.value)
def cb_handleListParam(self, answer):
p = answer and answer[1]
if p != None:
p[1].value = p[0]
self.showParams()
def getUserChannelId(self, usernm, callback):
url = 'https://www.googleapis.com/youtube/v3/channels?part=id&forUsername=%s&key=%s' % (usernm, APIKEYV3)
twAgentGetPage(url).addCallback(self.parseChannelId).addCallback(lambda x: callback()).addErrback(self.parseChannelId, True)
def parseChannelId(self, data, err=False):
try:
data = json.loads(data)
self.channelId = str(data['items'][0]['id'])
except:
printl('No CID found.',self,'E')
self.channelId = 'none'
def openListScreen(self):
qr = '&q='+urllib.quote(self.param_qr)
tm = self.param_time[config.mediaportal.yt_param_time_idx.value][1]
lr = self.param_metalang[config.mediaportal.yt_param_meta_idx.value][1]
regionid = self.param_regionid[config.mediaportal.yt_param_regionid_idx.value][1]
_3d = self.param_3d[config.mediaportal.yt_param_3d_idx.value][1]
dura = self.param_duration[config.mediaportal.yt_param_duration_idx.value][1]
vid_def = self.param_video_definition[config.mediaportal.yt_param_video_definition_idx.value][1]
event_type = self.param_event_types[config.mediaportal.yt_param_event_types_idx.value][1]
genreurl = self.mh_genreUrl[0] + self.mh_genreUrl[1]
if 'googleapis.com' in genreurl:
if '/guideCategories' in genreurl or '/playlists' in genreurl:
lr = param_hl[config.mediaportal.yt_param_meta_idx.value]
if not '%ACCESSTOKEN%' in genreurl:
if self.param_author:
if not self.channelId:
return self.getUserChannelId(self.param_author, self.openListScreen)
else:
channel_id = '&channelId=%s' % self.channelId
else: channel_id = ''
genreurl = genreurl.replace('%QR%', urllib.quote_plus(self.param_qr))
genreurl += regionid + lr + tm + channel_id + self.param_safesearch[0]
if 'type=video' in genreurl:
vid_type = self.param_video_type[config.mediaportal.yt_param_video_type_idx.value][1]
genreurl += _3d + dura + vid_def + event_type + vid_type
elif 'Favoriten' in self.mh_genreTitle:
genreurl = ''
elif ':Genres' in self.mh_genreTitle:
genreurl = self.mh_genreUrl[1] % self.mh_genreUrl[2]
elif 'Sport:' in self.mh_genreTitle or 'Beliebt auf' in self.mh_genreTitle or 'Music:' in self.mh_genreTitle or 'KinoCheck' in self.mh_genreTitle or '#Live' in self.mh_genreTitle:
genreurl = self.mh_genreUrl[0] + self.mh_genreUrl[1] + self.mh_genreUrl[2]
self.session.open(YT_ListScreen, genreurl, self.mh_genreTitle)
def mh_callGenreListScreen(self):
if 'Your channel' in self.mh_genreTitle:
if not config.mediaportal.yt_refresh_token.value:
self.session.open(MessageBoxExt, _("You need to request a token to allow access to your YouTube account."), MessageBoxExt.TYPE_INFO)
return
self.openListScreen()
def login(self):
if not config.mediaportal.yt_refresh_token.value:
yt_oauth2.requestDevCode(self.session)
else:
self.session.openWithCallback(self.cb_login, MessageBoxExt, _("Did you revoke the access?"), type=MessageBoxExt.TYPE_YESNO, default=False)
def cb_login(self, answer):
if answer is True:
yt_oauth2.requestDevCode(self.session)
def saveIdx(self):
config.mediaportal.yt_param_meta_idx.save()
yt_oauth2._tokenExpired()
class YT_ListScreen(MPScreen, ThumbsHelper):
param_regionid = (
('&gl=US'),
('&gl=GB'),
('&gl=DE'),
('&gl=FR'),
('&gl=IT')
)
def __init__(self, session, stvLink, stvGenre, title=YT_Version):
self.stvLink = stvLink
self.genreName = stvGenre
self.headers = std_headers
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/dokuListScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/dokuListScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self.favoGenre = self.genreName.startswith('Favoriten')
self.playlistGenre = 'Playlist feeds' == self.genreName or ':Playlists' in self.genreName
self.channelGenre = self.genreName in ('Channel feeds', 'Channel search')
self.subscriptionGenre = ':Subscriptions' in self.genreName
self.apiUrl = 'gdata.youtube.com' in self.stvLink
self.apiUrlv3 = 'googleapis.com' in self.stvLink
self.musicGenre = 'Music:' in self.genreName
self.ajaxUrl = '/c4_browse_ajax' in self.stvLink
self.c4_browse_ajax = ''
self.url_c4_browse_ajax_list = ['']
self["actions"] = ActionMap(["OkCancelActions", "ShortcutActions", "ColorActions", "SetupActions", "NumberActions", "MenuActions", "EPGSelectActions","DirectionActions"], {
"ok" : self.keyOK,
"red" : self.keyRed,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"upUp" : self.key_repeatedUp,
"rightUp" : self.key_repeatedUp,
"leftUp" : self.key_repeatedUp,
"downUp" : self.key_repeatedUp,
"upRepeated" : self.keyUpRepeated,
"downRepeated" : self.keyDownRepeated,
"rightRepeated" : self.keyRightRepeated,
"leftRepeated" : self.keyLeftRepeated,
"nextBouquet" : self.keyPageUpFast,
"prevBouquet" : self.keyPageDownFast,
"yellow" : self.keyTxtPageUp,
"blue" : self.keyTxtPageDown,
"green" : self.keyGreen,
"0" : self.closeAll,
"1" : self.key_1,
"3" : self.key_3,
"4" : self.key_4,
"6" : self.key_6,
"7" : self.key_7,
"9" : self.key_9
}, -1)
self['title'] = Label(title)
self['ContentTitle'] = Label(self.genreName)
if not self.favoGenre:
self['F2'] = Label(_("Favorite"))
self['F3'] = Label(_("Text-"))
self['F4'] = Label(_("Text+"))
else:
self['F2'] = Label(_("Delete"))
self['F3'] = Label(_("Text-"))
self['F4'] = Label(_("Text+"))
if ('order=' in self.stvLink) and ('type=video' in self.stvLink) or (self.apiUrl and '/uploads' in self.stvLink):
self['F1'] = Label(_("Sort by"))
self.key_sort = True
else:
self['F1'] = Label(_("Exit"))
self.key_sort = False
self['Page'] = Label(_("Page:"))
self['coverArt'].hide()
self.coverHelper = CoverHelper(self['coverArt'])
self.propertyImageUrl = None
self.keyLocked = True
self.baseUrl = "https://www.youtube.com"
self.lastUrl = None
self.videoPrio = int(config.mediaportal.youtubeprio.value)
self.videoPrioS = ['L','M','H']
self.setVideoPrio()
self.favo_path = config.mediaportal.watchlistpath.value + "mp_yt_favorites.xml"
self.keckse = CookieJar()
self.filmliste = []
self.start_idx = 1
self.max_res = int(config.mediaportal.youtube_max_items_pp.value)
self.max_pages = 1000 / self.max_res
self.total_res = 0
self.pages = 0
self.page = 0
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.load_more_href = None
self.onClose.append(self.youtubeExit)
self.modeShowThumb = 1
self.playAll = True
self.showCover = False
self.actType = None
if not self.apiUrl:
self.onLayoutFinish.append(self.loadPageData)
else:
self.onLayoutFinish.append(self.checkAPICallv2)
def checkAPICallv2(self):
m = re.search('/api/users/(.*?)/uploads\?', self.stvLink)
if m:
if not m.group(1).startswith('UC'):
url = 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&forUsername=%s&key=%s' % (m.group(1), APIKEYV3)
return twAgentGetPage(url, agent=None, headers=self.headers).addCallback(self.parsePlaylistId).addErrback(self.dataError)
else:
self.apiUrl = False
self.apiUrlv3 = True
self.stvLink = 'https://www.googleapis.com/youtube/v3/search?part=snippet&order=date&channelId=%s&key=%s' % (m.group(1), APIKEYV3)
reactor.callLater(0, self.loadPageData)
def parsePlaylistId(self, data):
data = json.loads(data)
try:
plid = data['items'][0]['contentDetails']['relatedPlaylists']['uploads']
except:
printl('No PLID found.',self,'E')
else:
self.stvLink = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&order=date&playlistId=%s&key=%s' % (str(plid), APIKEYV3)
self.apiUrl = False
self.apiUrlv3 = True
reactor.callLater(0, self.loadPageData)
def loadPageData(self):
self.keyLocked = True
self.ml.setList(map(self.YT_ListEntry, [(_('Please wait...'),'','','','','','')]))
if self.favoGenre:
self.getFavos()
else:
url = self.stvLink
if self.apiUrlv3:
url = url.replace('%KEY%', APIKEYV3)
url += "&maxResults=%d" % (self.max_res,)
if self.c4_browse_ajax:
url += '&pageToken=' + self.c4_browse_ajax
elif self.ajaxUrl:
if not 'paging=' in url:
url += '&paging=%d' % max(1, self.page)
url = '%s%s' % (self.baseUrl, url)
elif self.c4_browse_ajax:
url = '%s%s' % (self.baseUrl, self.c4_browse_ajax)
else:
if url[-1] == '?' or url[-1] == '&':
url = '%sflow=list' % url
elif url[-1] != '?' or url[-1] != '&':
url = '%s&flow=list' % url
if not '&gl=' in url:
url += self.param_regionid[config.mediaportal.yt_param_regionid_idx.value]
self.lastUrl = url
if self.apiUrlv3 and '%ACT-' in url:
self.actType = re.search('(%ACT-.*?%)', url).group(1)
url = url.replace(self.actType, '', 1)
self.actType = unicode(re.search('%ACT-(.*?)%', self.actType).group(1))
if '%ACCESSTOKEN%' in url:
token = yt_oauth2.getAccessToken()
if not token:
yt_oauth2.refreshToken(self.session).addCallback(self.getData, url).addErrback(self.dataError)
else:
self.getData(token, url)
else:
self.getData(None, url)
def getData(self, token, url):
if token:
url = url.replace('%ACCESSTOKEN%', token, 1)
if '%playlistId=' in url:
return self.getRelatedUserPL(url, token)
twAgentGetPage(url, cookieJar=self.keckse, agent=None, headers=self.headers).addCallback(self.genreData).addErrback(self.dataError)
def getRelatedUserPL(self, url, token):
pl = re.search('%playlistId=(.*?)%', url).group(1)
yt_url = re.sub('%playlistId=.*?%', '', url, 1)
twAgentGetPage(yt_url, cookieJar=self.keckse, agent=None, headers=self.headers).addCallback(self.parseRelatedPL, token, pl).addErrback(self.dataError)
def parseRelatedPL(self, data, token, pl):
try:
data = json.loads(data)
except:
pass
else:
for item in data.get('items', {}):
playlist = item['contentDetails']['relatedPlaylists']
if pl in playlist:
yt_url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&access_token=%s&order=date' % (str(playlist[pl]), token)
return twAgentGetPage(yt_url, cookieJar=self.keckse, agent=None, headers=self.headers).addCallback(self.genreData).addErrback(self.dataError)
reactor.callLater(0, genreData, '')
def parsePagingUrl(self, data):
regex = re.compile('data-uix-load-more-href="(.*?)"')
m = regex.search(data)
if m:
if not self.page:
self.page = 1
self.c4_browse_ajax = m.group(1).replace('&', '&')
else:
if not 'load-more-text' in data:
self.c4_browse_ajax = ''
self.pages = self.page
def parsePagingUrlv3(self, jdata):
if not self.page:
self.page = 1
self.c4_browse_ajax = str(jdata.get('nextPageToken', ''))
def genreData(self, data):
if self.apiUrlv3:
data = json.loads(data)
self.parsePagingUrlv3(data)
elif not self.apiUrl:
try:
if "load_more_widget_html" in data:
data = json.loads(data)
self.parsePagingUrl(data["load_more_widget_html"].replace("\\n","").replace("\\","").encode('utf-8'))
data = data["content_html"].replace("\\n","").replace("\\","").encode('utf-8')
else:
data = json.loads(data)["content_html"].replace("\\n","").replace("\\","").encode('utf-8')
self.parsePagingUrl(data)
except:
self.parsePagingUrl(data)
elif not self.pages:
m = re.search('totalResults>(.*?)</', data)
if m:
a = int(m.group(1))
self.pages = a // self.max_res
if a % self.max_res:
self.pages += 1
if self.pages > self.max_pages:
self.pages = self.max_pages
self.page = 1
a = 0
l = len(data)
self.filmliste = []
if self.apiUrlv3:
listType = re.search('ItemList|subscriptionList|activityList|playlistList|CategoryList', data['kind']) != None
for item in data.get('items', {}):
if not listType:
kind = item['id']['kind']
else:
kind = item['kind']
if kind:
if 'snippet' in item:
title = str(item['snippet']['title'])
if kind.endswith('#video'):
desc = str(item['snippet']['description'])
try:
url = str(item['id']['videoId'])
img = str(item['snippet']['thumbnails']['default']['url'])
except:
pass
else:
self.filmliste.append(('', title, url, img, desc, '', ''))
elif kind.endswith('#playlistItem'):
desc = str(item['snippet']['description'])
try:
url = str(item['snippet']['resourceId']['videoId'])
img = str(item['snippet']['thumbnails']['default']['url'])
except:
pass
else:
self.filmliste.append(('', title, url, img, desc, '', ''))
elif kind.endswith('#channel'):
desc = str(item['snippet']['description'])
url = str(item['id']['channelId'])
img = str(item['snippet']['thumbnails']['default']['url'])
self.filmliste.append(('', title, url, img, desc, 'CV3', ''))
elif kind.endswith('#playlist'):
desc = str(item['snippet']['description'])
if not listType:
url = str(item['id']['playlistId'])
else:
url = str(item['id'])
img = str(item['snippet']['thumbnails']['default']['url'])
self.filmliste.append(('', title, url, img, desc, 'PV3', ''))
elif kind.endswith('#subscription'):
desc = str(item['snippet']['description'])
url = str(item['snippet']['resourceId']['channelId'])
img = str(item['snippet']['thumbnails']['default']['url'])
self.filmliste.append(('', title, url, img, desc, 'CV3', ''))
elif kind.endswith('#guideCategory'):
desc = ''
url = str(item['snippet']['channelId'])
img = ''
self.filmliste.append(('', title, url, img, desc, 'GV3', ''))
elif kind.endswith('#activity'):
desc = str(item['snippet']['description'])
if item['snippet']['type'] == self.actType:
try:
if self.actType == u'upload':
url = str(item['contentDetails'][self.actType]['videoId'])
else:
url = str(item['contentDetails'][self.actType]['resourceId']['videoId'])
img = str(item['snippet']['thumbnails']['default']['url'])
except:
pass
else:
self.filmliste.append(('', title, url, img, desc, '', ''))
elif 'contentDetails' in item:
details = item['contentDetails']
if kind.endswith('#channel'):
if 'relatedPlaylists' in details:
for k, v in details['relatedPlaylists'].iteritems:
url = str(v)
img = ''
desc = ''
self.filmliste.append(('', str(k).title(), url, img, desc, 'PV3', ''))
else:
data = data.replace('\n', '')
entrys = None
list_item_cont = branded_item = shelf_item = yt_pl_thumb = list_item = pl_video_yt_uix_tile = yt_lockup_video = False
if self.genreName.endswith("Featured Channels") and "branded-page-related-channels-item" in data:
branded_item = True
entrys = data.split("branded-page-related-channels-item")
elif "channels-browse-content-list-item" in data:
list_item = True
entrys = data.split("channels-browse-content-list-item")
elif "browse-list-item-container" in data:
list_item_cont = True
entrys = data.split("browse-list-item-container")
elif re.search('[" ]+shelf-item[" ]+', data):
shelf_item = True
entrys = data.split("shelf-item ")
elif "yt-pl-thumb " in data:
yt_pl_thumb = True
entrys = data.split("yt-pl-thumb ")
elif "pl-video yt-uix-tile " in data:
pl_video_yt_uix_tile = True
entrys = data.split("pl-video yt-uix-tile ")
elif "yt-lockup-video " in data:
yt_lockup_video = True
entrys = data.split("yt-lockup-video ")
if entrys and not self.propertyImageUrl:
m = re.search('"appbar-nav-avatar" src="(.*?)"', entrys[0])
property_img = m and m.group(1)
if property_img:
if property_img.startswith('//'):
property_img = 'http:' + property_img
self.propertyImageUrl = property_img
if list_item_cont or branded_item or shelf_item or list_item or yt_pl_thumb or pl_video_yt_uix_tile or yt_lockup_video:
for entry in entrys[1:]:
if 'data-item-type="V"' in entry:
vidcnt = '[Paid Content] '
elif 'data-title="[Private' in entry:
vidcnt = '[private Video] '
else:
vidcnt = ''
gid = 'S'
m = re.search('href="(.*?)" class=', entry)
vid = m and m.group(1).replace('&','&')
if not vid:
continue
if branded_item and not '/SB' in vid:
continue
img = title = ''
if '<span class="" >' in entry:
m = re.search('<span class="" >(.*?)</span>', entry)
if m:
title += decodeHtml(m.group(1))
elif 'dir="ltr" title="' in entry:
m = re.search('dir="ltr" title="(.+?)"', entry, re.DOTALL)
if m:
title += decodeHtml(m.group(1).strip())
m = re.search('data-thumb="(.*?)"', entry)
img = m and m.group(1)
else:
m = re.search('dir="ltr".*?">(.+?)</a>', entry, re.DOTALL)
if m:
title += decodeHtml(m.group(1).strip())
m = re.search('data-thumb="(.*?)"', entry)
img = m and m.group(1)
if not img:
img = self.propertyImageUrl
if img and img.startswith('//'):
img = 'http:' + img
desc = ''
if not vidcnt and 'list=' in vid and not '/videos?' in self.stvLink:
m = re.search('formatted-video-count-label">\s+<b>(.*?)</b>', entry)
if m:
vidcnt = '[%s Videos] ' % m.group(1)
elif vid.startswith('/watch?'):
if not vidcnt:
vid = re.search('v=(.+)', vid).group(1)
gid = ''
m = re.search('video-time">(.+?)<', entry)
if m:
dura = m.group(1)
if len(dura)==4:
vtim = '0:0%s' % dura
elif len(dura)==5:
vtim = '0:%s' % dura
else:
vtim = dura
vidcnt = '[%s] ' % vtim
m = re.search('data-name=.*?>(.*?)</.*?<li>(.*?)</li>\s+</ul>', entry)
if m:
desc += 'von ' + decodeHtml(m.group(1)) + ' · ' + m.group(2).replace('</li>', ' ').replace('<li>', '· ') + '\n'
m = re.search('dir="ltr">(.+?)</div>', entry)
if (shelf_item or list_item_cont) and not desc and not m:
m = re.search('shelf-description.*?">(.+?)</div>', entry)
if m:
desc += decodeHtml(m.group(1).strip())
splits = desc.split('<br />')
desc = ''
for split in splits:
if not '<a href="' in split:
desc += split + '\n'
if list_item and not vidcnt:
m = re.search('yt-lockup-meta-info"><li>(.*?)</ul>', entry)
if m:
vidcnt = re.sub('<.*?>', '', m.group(1))
vidcnt = '[%s] ' % vidcnt
self.filmliste.append((vidcnt, str(title), vid, img, desc, gid, ''))
reactor.callLater(0, self.checkListe)
def checkListe(self):
if len(self.filmliste) == 0:
self.filmliste.append(('',_('No contents / results found!'),'','','','',''))
self.keyLocked = True
else:
if not self.page:
self.page = self.pages = 1
menu_len = len(self.filmliste)
self.keyLocked = False
self.ml.setList(map(self.YT_ListEntry, self.filmliste))
self.th_ThumbsQuery(self.filmliste, 1, 2, 3, None, None, self.page, self.pages, mode=self.modeShowThumb)
self.showInfos()
def dataError(self, error):
self.ml.setList(map(self.YT_ListEntry, [('',_('No contents / results found!'),'','','','','')]))
self['handlung'].setText("")
def showInfos(self):
if self.c4_browse_ajax and not self.pages:
self['page'].setText("%d" % self.page)
else:
self['page'].setText("%d / %d" % (self.page,max(self.page, self.pages)))
stvTitle = self['liste'].getCurrent()[0][1]
stvImage = self['liste'].getCurrent()[0][3]
desc = self['liste'].getCurrent()[0][4]
self['name'].setText(stvTitle)
self['handlung'].setText(desc)
self.coverHelper.getCover(stvImage)
def youtubeErr(self, error):
self['handlung'].setText(_("Unfortunately, this video can not be played!\n")+str(error))
def setVideoPrio(self):
self.videoPrio = int(config.mediaportal.youtubeprio.value)
self['vPrio'].setText(self.videoPrioS[self.videoPrio])
def delFavo(self):
i = self['liste'].getSelectedIndex()
c = j = 0
l = len(self.filmliste)
try:
f1 = open(self.favo_path, 'w')
while j < l:
if j != i:
c += 1
dura = self.filmliste[j][0]
dhTitle = self.filmliste[j][1]
dhVideoId = self.filmliste[j][2]
dhImg = self.filmliste[j][3]
desc = urllib.quote(self.filmliste[j][4])
gid = self.filmliste[j][5]
wdat = '<i>%d</i><n>%s</n><v>%s</v><im>%s</im><d>%s</d><g>%s</g><desc>%s</desc>\n' % (c, dhTitle, dhVideoId, dhImg, dura, gid, desc)
f1.write(wdat)
j += 1
f1.close()
self.getFavos()
except IOError, e:
print "Fehler:\n",e
print "eCode: ",e
self['handlung'].setText(_("Error!\n")+str(e))
f1.close()
def addFavo(self):
dhTitle = self['liste'].getCurrent()[0][1]
dura = self['liste'].getCurrent()[0][0]
dhImg = self['liste'].getCurrent()[0][3]
gid = self['liste'].getCurrent()[0][5]
desc = urllib.quote(self['liste'].getCurrent()[0][4])
dhVideoId = self['liste'].getCurrent()[0][2]
if not self.favoGenre and gid in ('S','P','C'):
dura = ''
dhTitle = self.genreName + ':' + dhTitle
try:
if not fileExists(self.favo_path):
f1 = open(self.favo_path, 'w')
f_new = True
else:
f_new = False
f1 = open(self.favo_path, 'a+')
max_i = 0
if not f_new:
data = f1.read()
for m in re.finditer('<i>(\d*?)</i>.*?<v>(.*?)</v>', data):
v_found = False
i, v = m.groups()
ix = int(i)
if ix > max_i:
max_i = ix
if v == dhVideoId:
v_found = True
if v_found:
f1.close()
self.session.open(MessageBoxExt, _("Favorite already exists"), MessageBoxExt.TYPE_INFO, timeout=5)
return
wdat = '<i>%d</i><n>%s</n><v>%s</v><im>%s</im><d>%s</d><g>%s</g><desc>%s</desc>\n' % (max_i + 1, dhTitle, dhVideoId, dhImg, dura, gid, desc)
f1.write(wdat)
f1.close()
self.session.open(MessageBoxExt, _("Favorite added"), MessageBoxExt.TYPE_INFO, timeout=5)
except IOError, e:
print "Fehler:\n",e
print "eCode: ",e
self['handlung'].setText(_("Error!\n")+str(e))
f1.close()
def getFavos(self):
self.filmliste = []
try:
if not fileExists(self.favo_path):
f_new = True
else:
f_new = False
f1 = open(self.favo_path, 'r')
if not f_new:
data = f1.read()
f1.close()
for m in re.finditer('<n>(.*?)</n><v>(.*?)</v><im>(.*?)</im><d>(.*?)</d><g>(.*?)</g><desc>(.*?)</desc>', data):
n, v, img, dura, gid, desc = m.groups()
if dura and not dura.startswith('['):
dura = '[%s] ' % dura.rstrip()
self.filmliste.append((dura, n, v, img, urllib.unquote(desc), gid, ''))
if len(self.filmliste) == 0:
self.pages = self.page = 0
self.filmliste.append((_('No videos found!'),'','','','','',''))
self.keyLocked = True
if not f_new and len(data) > 0:
os.remove(self.favo_path)
else:
self.pages = self.page = 1
self.keyLocked = False
self.ml.setList(map(self.YT_ListEntry, self.filmliste))
self.showInfos()
except IOError, e:
print "Fehler:\n",e
print "eCode: ",e
self['handlung'].setText(_("Error!\n")+str(e))
f1.close()
def changeSort(self):
list = (
(_("Date"), ("date", 0)),
(_("Rating"), ("rating", 1)),
(_("Relevance"), ("", 2)),
(_("Title"), ("title", 3)),
(_("Video count"), ("videoCount", 4)),
(_("View count"), ("viewCount", 5))
)
self.session.openWithCallback(self.cb_handleSortParam, ChoiceBoxExt, title=_("Sort by"), list = list, selection=config.mediaportal.yt_param_time_idx.value)
def cb_handleSortParam(self, answer):
p = answer and answer[1]
if p != None:
config.mediaportal.yt_param_time_idx.value = p[1]
self.stvLink = re.sub('order=([a-zA-Z]+)', p[0], self.stvLink)
self.loadPageData()
def keyRed(self):
if not self.key_sort:
self.keyCancel()
elif not self.keyLocked:
self.changeSort()
def keyUpRepeated(self):
if self.keyLocked:
return
self['liste'].up()
def keyDownRepeated(self):
if self.keyLocked:
return
self['liste'].down()
def key_repeatedUp(self):
if self.keyLocked:
return
self.showInfos()
def keyLeftRepeated(self):
if self.keyLocked:
return
self['liste'].pageUp()
def keyRightRepeated(self):
if self.keyLocked:
return
self['liste'].pageDown()
def keyUp(self):
if self.keyLocked:
return
i = self['liste'].getSelectedIndex()
if not i:
self.keyPageDownFast()
self['liste'].up()
self.showInfos()
def keyDown(self):
if self.keyLocked:
return
i = self['liste'].getSelectedIndex()
l = len(self.filmliste) - 1
if l == i:
self.keyPageUpFast()
self['liste'].down()
self.showInfos()
def keyTxtPageUp(self):
if self.keyLocked:
return
self['handlung'].pageUp()
def keyTxtPageDown(self):
if self.keyLocked:
return
self['handlung'].pageDown()
def keyPageUpFast(self,step=1):
if self.keyLocked:
return
oldpage = self.page
if not self.c4_browse_ajax and not self.apiUrlv3:
if not self.page or not self.pages:
return
if (self.page + step) <= self.pages:
self.page += step
self.start_idx += self.max_res * step
else:
self.page = 1
self.start_idx = 1
else:
self.url_c4_browse_ajax_list.append(self.c4_browse_ajax)
self.page += 1
if oldpage != self.page:
self.loadPageData()
def keyPageDownFast(self,step=1):
if self.keyLocked:
return
oldpage = self.page
if not self.c4_browse_ajax and not self.apiUrlv3:
if not self.page or not self.pages:
return
if (self.page - step) >= 1:
self.page -= step
self.start_idx -= self.max_res * step
else:
self.page = self.pages
self.start_idx = self.max_res * (self.pages - 1) + 1
else:
if self.page == 1:
return
self.url_c4_browse_ajax_list.pop()
self.c4_browse_ajax = self.url_c4_browse_ajax_list[-1]
self.page -= 1
if oldpage != self.page:
self.loadPageData()
def key_1(self):
self.keyPageDownFast(2)
def keyGreen(self):
if self.keyLocked:
return
if self.favoGenre:
self.delFavo()
else:
self.addFavo()
def key_4(self):
self.keyPageDownFast(5)
def key_7(self):
self.keyPageDownFast(10)
def key_3(self):
self.keyPageUpFast(2)
def key_6(self):
self.keyPageUpFast(5)
def key_9(self):
self.keyPageUpFast(10)
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][2]
gid = self['liste'].getCurrent()[0][5]
if gid == 'P' or gid == 'C':
dhTitle = 'Videos: ' + self['liste'].getCurrent()[0][1]
genreurl = self['liste'].getCurrent()[0][2]
if genreurl.startswith('http'):
genreurl = genreurl.replace('v=2', '')
else:
genreurl = 'https://gdata.youtube.com/feeds/api/playlists/'+self['liste'].getCurrent()[0][2]+'?'
dhTitle = 'Videos: ' + self['liste'].getCurrent()[0][1]
if self.favoGenre:
self.session.openWithCallback(self.getFavos, YT_ListScreen, genreurl, dhTitle)
else:
self.session.open(YT_ListScreen, genreurl, dhTitle)
elif gid == 'CV3':
dhTitle = 'Ergebnisse: ' + self['liste'].getCurrent()[0][1]
genreurl = self['liste'].getCurrent()[0][2]
genreurl = 'https://www.googleapis.com/youtube/v3/search?part=snippet%2Cid&type=video&order=date&channelId='+self['liste'].getCurrent()[0][2]+'&key=%KEY%'
if self.favoGenre:
self.session.openWithCallback(self.getFavos, YT_ListScreen, genreurl, dhTitle)
else:
self.session.open(YT_ListScreen, genreurl, dhTitle)
elif gid == 'GV3':
dhTitle = 'Ergebnisse: ' + self['liste'].getCurrent()[0][1]
genreurl = self['liste'].getCurrent()[0][2]
hl = param_hl[config.mediaportal.yt_param_meta_idx.value]
genreurl = 'https://www.googleapis.com/youtube/v3/playlists?part=snippet&channelId='+self['liste'].getCurrent()[0][2]+hl+'&key=%KEY%'
if self.favoGenre:
self.session.openWithCallback(self.getFavos, YT_ListScreen, genreurl, dhTitle)
else:
self.session.open(YT_ListScreen, genreurl, dhTitle)
elif gid == 'PV3':
dhTitle = 'Videos: ' + self['liste'].getCurrent()[0][1]
genreurl = self['liste'].getCurrent()[0][2]
genreurl = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&order=date&playlistId='+self['liste'].getCurrent()[0][2]+'&key=%KEY%'
if self.favoGenre:
self.session.openWithCallback(self.getFavos, YT_ListScreen, genreurl, dhTitle)
else:
self.session.open(YT_ListScreen, genreurl, dhTitle)
elif not self.apiUrl or gid == 'S':
if url.startswith('/playlist?'):
m = re.search('list=(.+)', url)
if m:
url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&order=date&key=' % m.group(1)
url += '%KEY%'
dhTitle = 'Playlist: ' + self['liste'].getCurrent()[0][1]
self.session.open(YT_ListScreen, url, dhTitle)
elif url.startswith('/user/') or url.startswith('/channel/'):
url = url.replace('&', '&')
if '?' in url:
url += '&'
else:
url += '?'
# url = self.baseUrl + url + '&flow=list&gl=US'
url = self.baseUrl + url
dhTitle = self.genreName + ':' + self['liste'].getCurrent()[0][1]
self.session.open(YT_ListScreen, url, dhTitle)
elif url.startswith('/watch?v='):
if not 'list=' in url or '/videos?' in self.stvLink:
url = re.search('v=(.+)', url).group(1)
listitem = self.filmliste[self['liste'].getSelectedIndex()]
liste = [(listitem[0], listitem[1], url, listitem[3], listitem[4], listitem[5], listitem[6])]
self.session.openWithCallback(
self.setVideoPrio,
YoutubePlayer,
liste,
0,
playAll = False,
listTitle = self.genreName,
plType='local',
title_inr=1,
showCover=self.showCover
)
else:
url = re.search('list=(.+)', url).group(1)
url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&order=date&key=' % url
url += '%KEY%'
dhTitle = 'Playlist: ' + self['liste'].getCurrent()[0][1]
self.session.open(YT_ListScreen, url, dhTitle)
else:
self.session.openWithCallback(
self.setVideoPrio,
YoutubePlayer,
self.filmliste,
self['liste'].getSelectedIndex(),
playAll = self.playAll,
listTitle = self.genreName,
plType='local',
title_inr=1,
showCover=self.showCover
)
elif not self['liste'].getCurrent()[0][6]:
self.session.openWithCallback(
self.setVideoPrio,
YoutubePlayer,
self.filmliste,
self['liste'].getSelectedIndex(),
playAll = self.playAll,
listTitle = self.genreName,
plType='local',
title_inr=1,
showCover=self.showCover
)
def youtubeExit(self):
self.keckse.clear()
del self.filmliste[:]
class YT_Oauth2:
OAUTH2_URL = 'https://accounts.google.com/o/oauth2'
CLIENT_ID = 'client_id=322644284204-umqj2oemlr7q2eofu0sv8dff9cvl7c9a.apps.googleusercontent.com'
CLIENT_SECRET = '&client_secret=dr5Lzk4-VWX7T6PK-dfb21Ic'
SCOPE = '&scope=https://www.googleapis.com/auth/youtube'
GRANT_TYPE = '&grant_type=http://oauth.net/grant_type/device/1.0'
TOKEN_PATH = '/etc/enigma2/mp_yt-access-tokens.json'
accessToken = None
def __init__(self):
import os.path
self._interval = None
self._code = None
self._expiresIn = None
self._refreshTimer = None
self.autoRefresh = False
self.abortPoll = False
self.waitingBox = None
self.session = None
if not config.mediaportal.yt_refresh_token.value:
self._recoverToken()
def _recoverToken(self):
if os.path.isfile(self.TOKEN_PATH):
with open(self.TOKEN_PATH) as data_file:
data = json.load(data_file)
config.mediaportal.yt_refresh_token.value = data['refresh_token'].encode('utf-8')
config.mediaportal.yt_refresh_token.save()
return True
def requestDevCode(self, session):
self.session = session
postData = self.CLIENT_ID + self.SCOPE
twAgentGetPage(self.OAUTH2_URL+'/device/code', method='POST', postdata=postData, headers={'Content-Type': 'application/x-www-form-urlencoded'}).addCallback(self._cb_requestDevCode, False).addErrback(self._cb_requestDevCode)
def _cb_requestDevCode(self, data, error=True):
if error:
self.session.open(MessageBoxExt, _("Error: Unable to request the Device code"), MessageBoxExt.TYPE_ERROR)
printl(_("Error: Unable to request the Device code"),self,'E')
print data
else:
googleData = json.loads(data)
self._interval = googleData['interval']
self._code = '&code=%s' % googleData['device_code'].encode('utf-8')
self._expiresIn = googleData['expires_in']
self.session.openWithCallback(self.cb_request, MessageBoxExt, _("You've to visit:\n{url}\nand enter the code: {code}\nCancel action?").format(url=googleData["verification_url"].encode('utf-8'), code=googleData["user_code"].encode('utf-8')), type = MessageBoxExt.TYPE_YESNO, default = False)
def cb_request(self, answer):
if answer is False:
self.waitingBox = self.session.openWithCallback(self.cb_cancelPoll, MessageBoxExt, _("Waiting for response from the server.\nCancel action?"), type = MessageBoxExt.TYPE_YESNO, default = True, timeout = self._expiresIn - 30)
self.abortPoll = False
reactor.callLater(self._interval, self._pollOauth2Server)
def cb_cancelPoll(self, answer):
if answer is True:
self.abortPoll = True
def _pollOauth2Server(self):
self._tokenExpired()
postData = self.CLIENT_ID + self.CLIENT_SECRET + self._code + self.GRANT_TYPE
twAgentGetPage(self.OAUTH2_URL+'/token', method='POST', postdata=postData, headers={'Content-Type': 'application/x-www-form-urlencoded'}).addCallback(self._cb_poll, False).addErrback(self._cb_poll)
def _cb_poll(self, data, error=True):
if error:
self.waitingBox.cancel()
self.session.open(MessageBoxExt, _('Error: Unable to get tokens!'), MessageBoxExt.TYPE_ERROR)
printl(_('Error: Unable to get tokens!'),self,'E')
print data
else:
try:
tokenData = json.loads(data)
except:
self.waitingBox.cancel()
self.session.open(MessageBoxExt, _('Error: Unable to get tokens!'), MessageBoxExt.TYPE_ERROR)
printl('json data error:%s' % str(data),self,'E')
else:
if not tokenData.get('error',''):
self.accessToken = tokenData['access_token'].encode('utf-8')
config.mediaportal.yt_refresh_token.value = tokenData['refresh_token'].encode('utf-8')
config.mediaportal.yt_refresh_token.value = tokenData['refresh_token'].encode('utf-8')
config.mediaportal.yt_refresh_token.save()
self._expiresIn = tokenData['expires_in']
self._startRefreshTimer()
f = open(self.TOKEN_PATH, 'w')
f.write(json.dumps(tokenData))
f.close()
self.waitingBox.cancel()
self.session.open(MessageBoxExt, _('Access granted :)\nFor safety you should create backup\'s of enigma2 settings and \'/etc/enigma2/mp_yt-access-tokens.json\'.\nThe tokens are valid until they are revoked in Your Google Account.'), MessageBoxExt.TYPE_INFO)
elif not self.abortPoll:
print tokenData.get('error','').encode('utf-8')
reactor.callLater(self._interval, self._pollOauth2Server)
def refreshToken(self, session, skip=False):
self.session = session
if not skip:
self._tokenExpired()
if config.mediaportal.yt_refresh_token.value:
postData = self.CLIENT_ID + self.CLIENT_SECRET + '&refresh_token=%s&grant_type=refresh_token' % config.mediaportal.yt_refresh_token.value
d = twAgentGetPage(self.OAUTH2_URL+'/token', method='POST', postdata=postData, headers={'Content-Type': 'application/x-www-form-urlencoded'}).addCallback(self._cb_refresh, False).addErrback(self._cb_refresh)
return d
def _cb_refresh(self, data, error=True):
if error:
printl(_('Error: Unable to refresh token!'),self,'E')
print data
return data
else:
try:
tokenData = json.loads(data)
self.accessToken = tokenData['access_token'].encode('utf-8')
self._expiresIn = tokenData['expires_in']
except:
printl('json data error!',self,'E')
print data
return ""
else:
self._startRefreshTimer()
return self.accessToken
def revokeToken(self):
if config.mediaportal.yt_refresh_token.value:
twAgentGetPage(self.OAUTH2_URL+'/revoke?token=%s' % config.mediaportal.yt_refresh_token.value).addCallback(self._cb_revoke, False).addErrback(self._cb_revoke)
def _cb_revoke(self, data, error=True):
if error:
printl('Error: Unable to revoke!',self,'E')
print data
def _startRefreshTimer(self):
if self._refreshTimer != None and self._refreshTimer.active():
self._refreshTimer.cancel()
self._refreshTimer = reactor.callLater(self._expiresIn - 10, self._tokenExpired)
def _tokenExpired(self):
if self._refreshTimer != None and self._refreshTimer.active():
self._refreshTimer.cancel()
self._expiresIn = 0
self.accessToken = None
def getAccessToken(self):
if self.accessToken == None:
return ""
else:
return self.accessToken
yt_oauth2 = YT_Oauth2()
|
n3wb13/OpenNfrGui-5.0-1
|
lib/python/Plugins/Extensions/MediaPortal/additions/fun/youtube.py
|
Python
|
gpl-2.0
| 53,786
|
[
"VisIt"
] |
2075d3f8525dfc103afa111db2cd3b981918a103903ea93a084e56fe51eb70d8
|
"""Common functionality shared across interfaces."""
# Copyright (c) 2016-2017 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import numpy as np
from spharm import gaussian_lats_wts
def get_apiorder(ndim, latitude_dim, longitude_dim):
"""
Get the dimension ordering for a transpose to the required API
dimension ordering.
**Arguments:**
*ndim*
Total number of dimensions to consider.
*latitude_dim*
Index of the latitude dimension.
*longitude_dim*
Index of the longitude dimension.
**Returns:**
*apiorder*
A list of indices corresponding to the order required to
conform to the specified API order.
*reorder*
The inverse indices corresponding to *apiorder*.
"""
apiorder = list(range(ndim))
apiorder.remove(latitude_dim)
apiorder.remove(longitude_dim)
apiorder.insert(0, latitude_dim)
apiorder.insert(1, longitude_dim)
reorder = [apiorder.index(i) for i in range(ndim)]
return apiorder, reorder
def inspect_gridtype(latitudes):
"""
Determine a grid type by examining the points of a latitude
dimension.
Raises a ValueError if the grid type cannot be determined.
**Argument:**
*latitudes*
An iterable of latitude point values.
**Returns:**
*gridtype*
Either 'gaussian' for a Gaussian grid or 'regular' for an
equally-spaced grid.
"""
# Define a tolerance value for differences, this value must be much
# smaller than expected grid spacings.
tolerance = 5e-4
# Get the number of latitude points in the dimension.
nlat = len(latitudes)
diffs = np.abs(np.diff(latitudes))
equally_spaced = (np.abs(diffs - diffs[0]) < tolerance).all()
if not equally_spaced:
# The latitudes are not equally-spaced, which suggests they might
# be gaussian. Construct sample gaussian latitudes and check if
# the two match.
gauss_reference, wts = gaussian_lats_wts(nlat)
difference = np.abs(latitudes - gauss_reference)
if (difference > tolerance).any():
raise ValueError('latitudes are neither equally-spaced '
'or Gaussian')
gridtype = 'gaussian'
else:
# The latitudes are equally-spaced. Construct reference global
# equally spaced latitudes and check that the two match.
if nlat % 2:
# Odd number of latitudes includes the poles.
equal_reference = np.linspace(90, -90, nlat)
else:
# Even number of latitudes doesn't include the poles.
delta_latitude = 180. / nlat
equal_reference = np.linspace(90 - 0.5 * delta_latitude,
-90 + 0.5 * delta_latitude,
nlat)
difference = np.abs(latitudes - equal_reference)
if (difference > tolerance).any():
raise ValueError('equally-spaced latitudes are invalid '
'(they may be non-global)')
gridtype = 'regular'
return gridtype
def to3d(array):
new_shape = array.shape[:2] + (np.prod(array.shape[2:], dtype=np.int),)
return array.reshape(new_shape)
|
ajdawson/windspharm
|
windspharm/_common.py
|
Python
|
mit
| 4,312
|
[
"Gaussian"
] |
48eb8583dc1f43da52ebd89f4876d9a64f4f8234ed04b79c92e44352143a6c4b
|
""" Modules will run collectl in playback mode and collect various process
statistics for a given pid's process and process ancestors.
"""
import collections
import csv
import tempfile
from galaxy import util
from ..collectl import stats
import logging
log = logging.getLogger( __name__ )
# Collectl process information cheat sheet:
#
# Record process information for current user.
# % collectl -sZ -f./__instrument_collectl -i 10:10 --procfilt U$USER
#
# TSV Replay of processing information in plottable mode...
#
# % collectl -sZ -P --sep=9 -p __instrument_collectl-jlaptop13-20140322-120919.raw.gz
#
# Has following columns:
# Date Time PID User PR PPID THRD S VmSize VmLck VmRSS VmData VmStk VmExe VmLib CPU SysT UsrT PCT AccumT RKB WKB RKBC WKBC RSYS WSYS CNCL MajF MinF Command
#
# Process data dumped one row per process per interval.
# http://collectl.sourceforge.net/Data-detail.html
PROCESS_COLUMNS = [
"#Date", # Date of interval - e.g. 20140322
"Time", # Time of interval - 12:18:58
"PID", # Process pid.
"User", # Process user.
"PR", # Priority of process.
"PPID", # Parent PID of process.
"THRD", # Thread???
"S", # Process state - S - Sleeping, D - Uninterruptable Sleep, R - Running, Z - Zombie or T - Stopped/Traced
# Memory options - http://ewx.livejournal.com/579283.html
"VmSize",
"VmLck",
"VmRSS",
"VmData",
"VmStk",
"VmExe",
"VmLib",
"CPU", # CPU number of process
"SysT", # Amount of system time consumed during interval
"UsrT", # Amount user time consumed during interval
"PCT", # Percentage of current interval consumed by task
"AccumT", # Total accumulated System and User time since the process began execution
# kilobytes read/written - requires I/O level monitoring to be enabled in kernel.
"RKB", # kilobytes read by process - requires I/O monitoring in kernel
"WKB",
"RKBC",
"WKBC",
"RSYS", # Number of read system calls
"WSYS", # Number of write system calls
"CNCL",
"MajF", # Number of major page faults
"MinF", # Number of minor page faults
"Command", # Command executed
]
# Types of statistics this module can summarize
STATISTIC_TYPES = [ "max", "min", "sum", "count", "avg" ]
COLUMN_INDICES = dict( [ ( col, i ) for i, col in enumerate( PROCESS_COLUMNS ) ] )
PID_INDEX = COLUMN_INDICES[ "PID" ]
PARENT_PID_INDEX = COLUMN_INDICES[ "PPID" ]
DEFAULT_STATISTICS = [
("max", "VmSize"),
("avg", "VmSize"),
("max", "VmRSS"),
("avg", "VmRSS"),
("sum", "SysT"),
("sum", "UsrT"),
("max", "PCT"),
("avg", "PCT"),
("max", "AccumT"),
("sum", "RSYS"),
("sum", "WSYS"),
]
def parse_process_statistics( statistics ):
""" Turn string or list of strings into list of tuples in format ( stat,
resource ) where stat is a value from STATISTIC_TYPES and resource is a
value from PROCESS_COLUMNS.
"""
if statistics is None:
statistics = DEFAULT_STATISTICS
statistics = util.listify( statistics )
statistics = map( _tuplize_statistic, statistics )
# Check for validity...
for statistic in statistics:
if statistic[ 0 ] not in STATISTIC_TYPES:
raise Exception( "Unknown statistic type encountered %s" % statistic[ 0 ] )
if statistic[ 1 ] not in PROCESS_COLUMNS:
raise Exception( "Unknown process column encountered %s" % statistic[ 1 ] )
return statistics
def generate_process_statistics( collectl_playback_cli, pid, statistics=DEFAULT_STATISTICS ):
""" Playback collectl file and generate summary statistics.
"""
with tempfile.NamedTemporaryFile( ) as tmp_tsv:
collectl_playback_cli.run( stdout=tmp_tsv )
with open( tmp_tsv.name, "r" ) as tsv_file:
return _read_process_statistics( tsv_file, pid, statistics )
def _read_process_statistics( tsv_file, pid, statistics ):
process_summarizer = CollectlProcessSummarizer( pid, statistics )
current_interval = None
for row in csv.reader( tsv_file, dialect="excel-tab" ):
if current_interval is None:
for header, expected_header in zip( row, PROCESS_COLUMNS ):
if header.lower() != expected_header.lower():
raise Exception( "Unknown header value encountered while processing collectl playback - %s" % header )
# First row, check contains correct header.
current_interval = CollectlProcessInterval()
continue
if current_interval.row_is_in( row ):
current_interval.add_row( row )
else:
process_summarizer.handle_interval( current_interval )
current_interval = CollectlProcessInterval()
# Do we have unsummarized rows...
if current_interval and current_interval.rows:
process_summarizer.handle_interval( current_interval )
return process_summarizer.get_statistics()
class CollectlProcessSummarizer( object ):
def __init__( self, pid, statistics ):
self.pid = pid
self.statistics = statistics
self.columns_of_interest = set( [ s[ 1 ] for s in statistics ] )
self.tree_statistics = collections.defaultdict( stats.StatisticsTracker )
self.process_accum_statistics = collections.defaultdict( stats.StatisticsTracker )
self.interval_count = 0
def handle_interval( self, interval ):
self.interval_count += 1
rows = self.__rows_for_process( interval.rows, self.pid )
for column_name in self.columns_of_interest:
column_index = COLUMN_INDICES[ column_name ]
if column_name == "AccumT":
# Should not sum this across pids each interval, sum max at end...
for r in rows:
pid_seconds = self.__time_to_seconds( r[ column_index ] )
self.process_accum_statistics[ r[ PID_INDEX ] ].track( pid_seconds )
else:
# All other stastics should be summed across whole process tree
# at each interval I guess.
if column_name in [ "SysT", "UsrT", "PCT" ]:
to_num = float
else:
to_num = long
interval_stat = sum( to_num( r[ column_index ] ) for r in rows )
self.tree_statistics[ column_name ].track( interval_stat )
def get_statistics( self ):
if self.interval_count == 0:
return []
computed_statistics = []
for statistic in self.statistics:
statistic_type, column = statistic
if column == "AccumT":
# Only thing that makes sense is sum
if statistic_type != "max":
log.warn( "Only statistic max makes sense for AccumT" )
continue
value = sum( [ v.max for v in self.process_accum_statistics.itervalues() ] )
else:
statistics_tracker = self.tree_statistics[ column ]
value = getattr( statistics_tracker, statistic_type )
computed_statistic = ( statistic, value )
computed_statistics.append( computed_statistic )
return computed_statistics
def __rows_for_process( self, rows, pid ):
process_rows = []
pids = self.__all_child_pids( rows, pid )
for row in rows:
if row[ PID_INDEX ] in pids:
process_rows.append( row )
return process_rows
def __all_child_pids( self, rows, pid ):
pids_in_process_tree = set( [ str( self.pid ) ] )
added = True
while added:
added = False
for row in rows:
pid = row[ PID_INDEX ]
parent_pid = row[ PARENT_PID_INDEX ]
if parent_pid in pids_in_process_tree and pid not in pids_in_process_tree:
pids_in_process_tree.add( pid )
added = True
return pids_in_process_tree
def __time_to_seconds( self, minutes_str ):
parts = minutes_str.split( ":" )
seconds = 0.0
for i, val in enumerate( parts ):
seconds += float(val) * ( 60 ** ( len( parts ) - ( i + 1 ) ) )
return seconds
class CollectlProcessInterval( object ):
""" Represent all rows in collectl playback file for given time slice with
ability to filter out just rows corresponding to the process tree
corresponding to a given pid.
"""
def __init__( self ):
self.rows = []
def row_is_in( self, row ):
if not self.rows: # No rows, this row defines interval.
return True
first_row = self.rows[ 0 ]
return first_row[ 0 ] == row[ 0 ] and first_row[ 1 ] == row[ 1 ]
def add_row( self, row ):
self.rows.append( row )
def _tuplize_statistic( statistic ):
if not isinstance( statistic, tuple ):
statistic_split = statistic.split( "_", 1 )
statistic = ( statistic_split[ 0 ].lower(), statistic_split[ 1 ] )
return statistic
__all__ = [ 'generate_process_statistics' ]
|
ssorgatem/pulsar
|
galaxy/jobs/metrics/collectl/processes.py
|
Python
|
apache-2.0
| 9,204
|
[
"Galaxy"
] |
e8042e91c73ed012c89bb08b20f043d6207192befe25f9cd5a84a4af9bc8c813
|
"""
Bootstrap Embedding
"""
import time
import os
import numpy as np
import h5py
from pyscf import ao2mo
from frankenstein import molecule, scf
from frankenstein.be.sd import SD
from frankenstein.pyscf_be.pysd import pySD
from frankenstein.optimizer import NRQN
from frankenstein.tools.io_utils import prtvar
def initialize_solver(mb):
# use fci by default
if mb.imp_sol is None:
raise ValueError("imp_sol must be given! (choose one from 'FCI'/'MP2'/'CCSD'/'CISD'/'RHF')")
solver = mb.imp_sol.upper()
if not solver in ["FCI", "MP2", "CCSD", "CISD", "RHF"]:
raise ValueError("Unknown solver %s." % mb.imp_sol)
# build/check solver parameters
if mb.sol_params is None:
if solver == "FCI":
mb.sol_params = {"state": 0, "method": "davidson", "S2": 0.}
elif solver == "MP2":
mb.sol_params = {"lr_rdm": False}
elif solver == "CCSD":
# 'rdm' could be 'relaxed', 'unrelaxed1', 'unrelaxed2'
mb.sol_params = {"rdm": "relaxed"}
elif solver == "CISD":
mb.sol_params = dict()
elif solver == "RHF":
mb.sol_params = dict()
elif isinstance(mb.sol_params, dict):
if solver == "FCI":
if not ("state" in mb.sol_params and "method" in mb.sol_params
and "S2" in mb.sol_params):
raise ValueError("Invalid 'sol_params' for FCI. Must give 'state', 'method', and 'S2'.")
elif solver == "MP2":
if not "lr_rdm" in mb.sol_params:
raise ValueError("Invalid 'sol_params' for MP2. Must give 'lr_rdm'.")
elif solver == "CCSD":
if not "rdm" in mb.sol_params:
raise ValueError("Invalid 'sol_params' for CCSD. Must give 'rdm'.")
elif solver == "CISD":
pass
elif solver == "RHF":
pass
def has_2econ(cons):
has_ = False
for con in cons:
if len(con[0]) == 4:
has_ = True
break
return has_
def make_pot(nsao, cons, u, heff_extra):
"""Generate effective potentials in Schmidt space
Attributes:
nsao (int):
dimension of Schmidt space
cons (3d list of ints):
A list of indices to be bootstrapped
u (list of floats):
A list of potential values
Returns:
A tuple of heff and Veff
"""
if len(cons) != len(u):
raise RuntimeError("# of constraints ({:d}) does not match the length of u ({:d}).".format(len(cons), len(u)))
heff = None
Veff = None
for I, deg_con in enumerate(cons):
for con in deg_con:
if len(con) == 2:
if heff is None:
heff = np.zeros([nsao,nsao])
i, j = con[0], con[1]
heff[i, j] = heff[j, i] = u[I]
elif len(con) == 4:
if Veff is None:
Veff = np.zeros([nsao, nsao, nsao, nsao])
mutate_all(Veff, con, u[I])
else:
raise RuntimeError("Arg2 (cons) has incorrect shape.")
if not heff_extra is None:
if heff is None:
heff = heff_extra
else:
heff += heff_extra
return heff, Veff
def solve_impurity(u, msd, cons, imp_sol, sol_params, rdm_level, heff_extra):
heff, Veff = make_pot(msd.nsao, cons, u, heff_extra)
mc = msd.solve_impurity(imp_sol, rdm_level=rdm_level, heff=heff,
sol_params=sol_params)
return mc
def get_curest(cons, rdm1, rdm2):
"""Compute current estimation of desired matrix elements
(Specified by cons; To be bootstrapped to targets)
Attributes:
cons (3d list of ints):
A list of indices to be bootstrapped
cons[i][j][k] is the k-th index of the j-th degenerate
component of the i-th constraints
rdm1, rdm2 (np.ndarray):
current estimation of rdm's
Returns:
objective function (np.ndarray)
"""
curest = []
for deg_con in cons:
if len(deg_con[0]) == 2:
con = deg_con[0]
curest.append(rdm1[con[0], con[1]])
elif len(deg_con[0]) == 4:
con = deg_con[0]
curest.append(rdm2[con[0], con[1], con[2], con[3]])
else:
raise RuntimeError("Arg1 (cons) has incorrect shape.")
return np.array(curest)
def get_targets(good_con, rdm1, rdm2):
"""Get targets values from good_con
Attributes:
good_con (list of list of ints of good_vals):
A list of indices that correspond to "good" values
good_con[i][k] is the k-th index of the i-th good entry
"""
ncon = len(good_con)
targets = []
for gcon in good_con:
if isinstance(gcon, float):
targets.append(gcon)
elif isinstance(gcon, list):
if len(gcon) == 2:
targets.append(rdm1[gcon[0], gcon[1]])
elif len(gcon) == 4:
targets.append(rdm2[gcon[0], gcon[1], gcon[2], gcon[3]])
else:
raise RuntimeError("Arg1 (good_con) has incorrect shape.")
else:
raise TypeError("Arg1 (good_con) has incompatible type.")
return np.array(targets)
def get_be_obj(u, msd, cons, good_con, imp_sol, sol_params, heff_extra):
"""Objective function for Bootstrap Embedding
Attributes:
u (list of floats):
A list of potential values.
Args:
cons (3d list of ints):
A list of indices to be bootstrapped
good_con (2d list of ints or np.ndarray):
if 2d list:
bootstrapped to matrix elements specified by good_con
if np.ndarray:
bootstrapped to values specified by good_con
Returns:
loss (float)
"""
if has_2econ(cons):
raise ValueError("Currently we do not support 2e constraints.")
rdm_level = 1
mc = solve_impurity(u, msd, cons, imp_sol, sol_params, rdm_level,
heff_extra)
rdm1s = msd.make_rdm1(mc)
rdm2s = None
curest = get_curest(cons, rdm1s, rdm2s)
targets = get_targets(good_con, rdm1s, rdm2s)
obj = curest - targets
return obj
def get_be_obj_jac_lr_scf(u, msd, cons, good_con, heff_extra):
"""Compute the SCF orbital relaxation contribution to Jacobian using linear response
"""
if has_2econ(cons):
raise RuntimeError("Currently lr-jac only supports 1e constraints.")
heff, Veff = make_pot(msd.nsao, cons, u, heff_extra)
ncon = u.shape[0]
# rhf
if type(msd) is SD:
mols = msd.get_mol(heff=heff)
elif type(msd) is pySD:
mols = msd.get_frankmol(heff=heff)
else:
raise ValueError("Unknown type of msd.")
mf = scf.RHF(mols, verbose=0)
mf.kernel()
C = mf.mo_coeff
# get perturbation list
vs = []
for I, deg_con in enumerate(cons):
v = np.zeros([msd.nsao,msd.nsao])
for con in deg_con:
i, j = con
v[i, j] = v[j, i] = 1.
vs.append(v)
# cphf
from frankenstein.tools.cphf_utils import (cphf_kernel_batch,
get_full_u_batch, uvo_as_full_u_batch)
us = cphf_kernel_batch(mf, vs)
Us = uvo_as_full_u_batch(mf, us)
# compute density matrix in MO basis
dm1_mo = np.diag([1 if i < mf.nocc else 0 for i in range(mf.nao)])
# collect terms
J = np.zeros([ncon,ncon])
for i in range(ncon):
dm1_ao_lr = C @ (Us[i]@dm1_mo +dm1_mo@Us[i].T) @ C.T
J[:,i] = get_curest(cons, dm1_ao_lr, None)
for j in range(ncon):
if not isinstance(good_con[j], float):
p,q = good_con[j]
J[j,i] -= dm1_ao_lr[p,q]
return J
def get_be_obj_jac_lr(u, msd, cons, good_con, heff_extra, lr_rdm=False):
"""Compute Jacobian using linear response
"""
if has_2econ(cons):
raise RuntimeError("Currently lr-jac only supports 1e constraints.")
heff, Veff = make_pot(msd.nsao, cons, u, heff_extra)
# rhf
if type(msd) is SD:
mols = msd.get_mol(heff=heff)
elif type(msd) is pySD:
mols = msd.get_frankmol(heff=heff)
else:
raise ValueError("Unknown type of msd.")
mf = scf.RHF(mols, verbose=0)
mf.kernel()
C = mf.mo_coeff
# get perturbation list
vs = []
for I, deg_con in enumerate(cons):
v = np.zeros([msd.nsao,msd.nsao])
for con in deg_con:
i, j = con
v[i, j] = v[j, i] = 1.
vs.append(v)
# full lr rdm1 or unrelaxed
if lr_rdm:
from frankenstein.tools.mp2_utils import MP2_ERIS, mp2_rdm
eris = MP2_ERIS(mf)
dm1_ao, dm1s_mo_lr = mp2_rdm(mf, eris, vs=vs)
del eris
else:
# cphf
from frankenstein.tools.cphf_utils import cphf_kernel_batch, get_full_u_batch
us = cphf_kernel_batch(mf, vs)
Us = get_full_u_batch(mf, vs, us)
# lr mp2
from frankenstein.tools.mp2_utils import get_tot_rdm1_mo_an_batch
dm1s_mo_lr = get_tot_rdm1_mo_an_batch(mf, vs, Us)
# collect terms
ncon = u.shape[0]
J = np.zeros([ncon,ncon])
for i in range(ncon):
dm1_ao_lr = C @ dm1s_mo_lr[i] @ C.T
J[:,i] = get_curest(cons, dm1_ao_lr, None)
for j in range(ncon):
if not isinstance(good_con[j], float):
p,q = good_con[j]
J[j,i] -= dm1_ao_lr[p,q]
return J
class BE:
"""Basic class for Bootstrap Embedding
Properties that can be set upon initialization
verbose (int, default: msd.verbose)
See MOL.__doc__ for details.
obj_conv (int, default: 7)
Deemed converged when 2-norm of BE matching error < 10**-obj_conv.
du_conv (int, default: 6)
Deemed converged when 2-norm of NR/QN step length < 10**-obj_conv.
max_iter (int, default: 50)
Maximum number of NR/QN steps.
imp_sol (default: "fci")
Could be "fci", "ccsd", "cisd", "mp2", "rhf", case insensitive
sol_params (dict, default: depends on imp_sol)
For FCI, it needs
"state" : 0 for ground, 1 for 1st excited, etc
"method" : "davidson" or "bf" (brute-force)
"S2" : 0. for singlet, 2. for triplet, etc.
For MP2, it needs
"lr_rdm" : if True, relaxed density is used
For other solvers, nothing is needed for now.
jac (str or int, default: None)
None : no jacobian is computed --> Quasi-Newton
2 : 2nd-order numerical jacobian --> Newton-Raphson
4 : 4th-order numerical jacobian --> Newton-Raphson
"lr" : linear response (analytical) jacobian --> Newton-Raphson
[NOTE] currently "lr" only supports imp_sol = RHF and MP2.
u0 (np.array, ncon, default: all zero)
Initial guess for BE potential.
B0 (str or np.array, ncon*ncon, default: eye)
Initial guess for jacobian (for NR) or inverted jacobian (for QN).
It could be either a ncon-by-ncon matrix, or string "scf". If the latter, SCF jacobian is used.
bad_con/good_con (list):
These are better explained by example.
E.g., we want to match P_11, P_33 to P_22 and P_12 to P_23, and we know P_11 = P_33 always holds for symmetry reason (i.e., degenerate), we have
bad_con = [[[1,1],[3,3]], [[1,2]]]
good_con = [[2,2], [2,3]]
This could also be achieved via
>>> mb.add_constraint([[1,1],[3,3]], [2,2])
>>> mb.add_constraint([[2,2]], [2,3])
Properties that are generated once "mb.kernel" is called
is_converged (bool):
Convergence status of the BE iteration algorithm.
u (np.array, ncon):
BE matching potential
mc (solver instance):
An instance of the solver, evaluated at the optimized potential
rdm1s (np.array, msd.nsao*msd.nsao):
rdm1 in Schmidt basis (the first msd.nf bases are fragments)
rdm2s (np.array, msd.nsao*msd.nsao*msd.nsao*msd.nsao):
Same as above, but for rdm2
e_persite ([float]*msd.nf):
Electronic energy by fragment sites
e1/2_persite ([float]*msd.nf):
Same as above, but for one-/two-electron energy.
"""
def __init__(self, msd, **kwargs):
"""Initialize a BE instance from input parameters
Attributes:
see BE.__doc__ for more information
Notes:
1. bad_con/good_con can be set either through kwargs
in initialization or :func:`add_constraint`.
2. We highly recommend to call :func:`check_constraints` after
setting constraints.
"""
if not isinstance(msd, SD):
raise TypeError("Arg1 (mf) of BE.__init__ must be a SD instance.")
self.msd = msd
# these properties can be set via initialization
self.verbose = msd.verbose
self.obj_conv = 7
self.du_conv = 6
self.max_iter = 50
self.imp_sol = None
self.sol_params = None
self.jac = None
self.u0 = None
self.B0 = None
self.u0_status = None
self.B0_status = None
self.bad_con = []
self.good_con = []
self.heff_extra = None # extra effective potentials (e.g., chempot)
self.skip_postprocessing = False
self.__dict__.update(kwargs)
self.u = None
self.mc = None
self.e_persite = None
self.e1_persite = None
self.e2_persite = None
self.fc_tot = 0
self.jc_tot = 0
self.is_converged = False
# determine solver
self.initialize_solver()
# properties
@property
def ncon(self):
"""Note that "ncon = 0" only requires "good_con is None". Thus, "bad_con" can still have non-None values.
"""
return 0 if self.bad_con is None else len(self.bad_con)
@property
def dry_run(self):
return len(self.bad_con)*len(self.good_con) == 0
# methods for adding and checking constraints
def add_constraint(self, bad_con=None, good_con=None):
"""Add constraints for Bootstrap Embedding
Inp:
bad_con (2d list of ints):
bad_con[j][k] is the k-th index of the j-th degenerate
constraints
good_con (list of ints or float):
if list:
good_con[k] is the k-th index
if float:
good value
Notes:
1. If either of bad_con or good_con is None, this function does
nothing. This feature is useful when doing FBE.
2. We highly recommend to call :func:`check_constraints` after
adding all constraints.
Examples:
>>> # chose 0, 1, 3, 4 to be fragment sites
>>> msd = SD(mf, [0,1,3,4])
>>> mb = be.BE(msd)
>>> # require P_00 to be 0.4
>>> mb.add_constraint([[0, 0]], 0.4)
>>> # require P_01 and P_34 (degenerate) to match P_13
>>> mb.add_constraint([[0, 1], [3, 4]], [1, 3])
"""
if bad_con is None or good_con is None:
return
if isinstance(bad_con, list) and isinstance(bad_con[0], list) \
and isinstance(bad_con[0][0], int):
self.bad_con.append(bad_con)
else:
raise TypeError("Arg1 (bad_con) must be a 2d list of ints.")
if (isinstance(good_con, list) and isinstance(good_con[0], int)) \
or isinstance(good_con, float):
self.good_con.append(good_con)
else:
raise TypeError("Arg2 (good_con) must be either list of ints or float.")
def check_constraints(self):
"""Check consistency of user input bootstrap constraints
"""
if not (len(self.bad_con) == len(self.good_con)):
raise ValueError("bad_con and good_con must be of same length.")
# printing
@staticmethod
def get_name():
return "BE"
def get_name(self):
return "BE"
def print_be(self, mode, *args):
"""
"""
nspace = 41
name = self.get_name()
if mode == 0:
print(">>> Entering {:s} kernel\n".format(name))
prtvar("BE conv tol for obj", self.obj_conv, "{:d}")
prtvar("BE conv tol for du", self.du_conv, "{:d}")
prtvar("BE max iteration", self.max_iter, "{:d}")
prtvar("BE # of constraints", self.ncon, "{:d}")
if self.ncon == 0:
print("No constraints are detected --> dry run.", flush=True)
return
prtvar("BE bad inds", str(self.bad_con), "{:s}")
prtvar("BE target inds/vals", str(self.good_con), "{:s}")
prtvar("BE impurity solver", self.imp_sol, "{:s}")
prtvar("BE sol_params", str(self.sol_params), "{:s}")
solver = self.imp_sol.upper()
if solver == "FCI":
prtvar("BE embedding state", self.sol_params["state"], "{:d}")
if solver == "MP2":
prtvar("BE rdm type", "relaxed" if self.sol_params["lr_rdm"]
else "unrelaxed", "{:s}")
prtvar("BE opt algorithm", str(self.optimizer.alg), "{:s}")
prtvar("BE jac method", str(self.jac), "{:s}")
prtvar("BE init u", self.u0_status, "{:s}")
prtvar("BE init B", self.B0_status, "{:s}")
print("\n Starting BE iteration", flush=True)
print(flush=True)
elif mode == 1:
print("\t"+"-"*nspace, flush=True)
print("\t"+" {:4s} {:9s} {:9s} {:s}".format("iter",
"err_obj".rjust(9), "err_du".rjust(9), "comment"))
print("\t"+"-"*nspace, flush=True)
elif mode == 2:
iteration = args[0]
print("\t"+" {:4d} {:.3E} {:.3E} {:s}".format(iteration,
self.optimizer.err_f, self.optimizer.err_dx,
self.optimizer.comment), flush=True)
elif mode == 3:
print("\t"+"-"*nspace, flush=True)
stat_msg = "converged!" if self.is_converged \
else "failed to converge."
msg = self.optimizer.alg + " {:s}".format(stat_msg)
print("\t"+" "*(nspace-len(msg))+msg+"\n", flush=True)
elif mode == 4:
t_init, t_iter, t_post = args
prtvar("# of solver calls", self.optimizer.fc_tot, "{:d}")
prtvar("# of jacobian calls", self.optimizer.jc_tot, "{:d}")
prtvar("t_wall (init)", "{:.3f} sec".format(t_init), "{:s}")
prtvar("t_wall (BE iter)", "{:.3f} sec".format(t_iter), "{:s}")
prtvar("t_wall (postproc)", "{:.3f} sec".format(t_post), "{:s}")
prtvar("Final BE error", self.optimizer.err_f, "{:.3E}")
prtvar("Final BE potentials", " ".join(["{: .6E}".format(ui)
for ui in self.u]), "{:s}")
if not self.e_persite is None:
prtvar("BE energy persite", " ".join(["{: .10f}".format(ei)
for ei in self.e_persite]), "{:s}")
print("\n<<< Leaving {:s} kernel\n".format(name))
else:
raise ValueError("Unknown mode {:s}.".format(str(mode)))
# methods for initialization
initialize_solver = initialize_solver
def initialize_optimizer(self):
m = self.msd
args = (m, self.bad_con, self.good_con, self.imp_sol, self.sol_params,
self.heff_extra)
args_ = (m, self.bad_con, self.good_con, self.heff_extra)
# wrapper function for BE error
def get_be_obj_wrapper(u):
return get_be_obj(u, *args)
# determine jacobian type
if self.jac in [2,4]:
jac = self.jac
elif self.jac == "lr":
solver = self.imp_sol.upper()
if solver == "RHF":
def jac(u):
return get_be_obj_jac_lr_scf(u, *args_)
elif solver == "MP2":
def jac(u):
m = self.msd
return get_be_obj_jac_lr(u, *args_,
lr_rdm=self.sol_params["lr_rdm"])
else:
raise ValueError("""Currently {:s} solver does not support analytical gradient. Use "jac" = 2 or 4 for Newton-Raphson algorithm with second/fouth-order numerical gradient or "jac" = None for quasi-Newton algorithm.""".format(solver))
elif self.jac is None:
jac = None
else:
raise ValueError("""Unknown value for "jac" (must be 2, 4, callable, or None).""")
# determine initial guess for u
if self.u0 is None:
self.u0 = np.zeros(self.ncon)
self.u0_status = "zeros"
elif isinstance(self.u0, np.ndarray):
if not (self.u0.ndim == 1 and self.u0.size == self.ncon):
raise ValueError("Input u0 has invalid shape.")
self.u0_status = "input"
else:
raise ValueError("Input u0 must be either None or numpy array.")
# determine initial guess for B
if jac is None: # Broyden
if self.B0 is None:
B0 = np.eye(self.ncon)
self.B0_status = "eye"
elif isinstance(self.B0, np.ndarray):
if not (self.B0.ndim == 2 and self.B0.size == self.ncon**2):
raise ValueError("Input B0 has invalid shape.")
B0 = self.B0.copy()
self.B0 = None # input B0 is only good for once
self.B0_status = "input"
elif isinstance(self.B0, str):
B0_str = self.B0.upper()
if B0_str in ["SCF", "RHF"]:
B0 = get_be_obj_jac_lr_scf(self.u0, *args_)
elif B0_str == "MP2":
B0 = get_be_obj_jac_lr(self.u0, *args_)
else:
raise ValueError("Unknown B0 type {:s}".format(self.B0))
B0 = np.linalg.inv(B0) # QN needs inv Hess
self.B0_status = B0_str
else:
raise ValueError("Input B0 is invalid.")
else: # Newton
if isinstance(self.B0, np.ndarray):
if not (self.B0.ndim == 2 and self.B0.size == self.ncon**2):
raise ValueError("Input B0 has invalid shape.")
B0 = self.B0.copy()
self.B0 = None # input B0 is only good for once
self.B0_status = "input"
else:
B0 = None
self.optimizer = NRQN(get_be_obj_wrapper, self.ncon, jac=jac,
x0=self.u0, B0=B0, conv_f=self.obj_conv, conv_dx=self.du_conv)
def postprocessing(self):
m = self.msd
self.mc = self.solve_impurity(rdm_level=2)
self.rdm1s = self.msd.make_rdm1(self.mc)
self.rdm2s = self.msd.make_rdm2(self.mc)
self.e1_persite, self.e2_persite, self.e_persite = \
self.msd.get_SD_energy(self.rdm1s, self.rdm2s)
# postprocessing
def solve_impurity(self, rdm_level=0):
m = self.msd
mc = solve_impurity(self.u, m, self.bad_con, self.imp_sol,
self.sol_params, rdm_level, self.heff_extra)
return mc
def make_pot(self, u=None, heff_extra=None):
if u is None: u = self.u
if heff_extra is None: heff_extra = self.heff_extra
return make_pot(self.msd.nsao, self.bad_con, u, heff_extra)
# kernel
def kernel(self):
# if no constraints, simply return
if self.dry_run:
self.is_converged = True
self.u = np.array([]) if self.u0 is None else self.u0
return
# get optimizer
start = time.time()
self.initialize_optimizer()
end = time.time()
t_init = end - start
# print basic job info
if self.verbose > 1:
self.print_be(0)
# print header
if self.verbose > 0:
self.print_be(1)
# BE iteration
start = time.time()
self.is_converged = False
for iteration in range(1,self.max_iter+1):
if self.optimizer.next_step():
self.is_converged = True
if self.verbose > 0:
self.print_be(2, iteration)
if self.is_converged:
break
end = time.time()
t_iter = end - start
self.fc_tot = self.optimizer.fc_tot
self.jc_tot = self.optimizer.jc_tot
if self.verbose > 0:
self.print_be(3)
# postprocessing
self.u = self.optimizer.xnew
start = time.time()
if not self.skip_postprocessing:
self.postprocessing()
end = time.time()
t_post = end - start
if self.verbose > 0:
self.print_be(4, t_init, t_iter, t_post)
def delete_eri(self):
self.msd.delete_eri()
def delete_erifile(self):
self.msd.delete_erifile()
if __name__ == "__main__":
pass
|
hongzhouye/frankenstein
|
be/be.py
|
Python
|
bsd-3-clause
| 25,296
|
[
"PySCF"
] |
24fe803c9ec03019df48b89915876815d4ba8f5e66a2c35bd78d51f28b089b1a
|
#!/usr/bin/env python
"""Module to generically wrap modules for usage from within Galaxy.
First command line argument is a module, e.g: "translate".
Second command line argument is a file to log any output to.
Further arguments are passed to the named module's main method."""
__author__ = "Tim te Beek"
__copyright__ = "Copyright 2011, Netherlands Bioinformatics Centre"
__license__ = "MIT"
import ftplib
# First argument contains fully qualified name of module to be imported
import logging
import sys
NAME = sys.argv[1]
try:
MODULE = __import__(NAME)
except ImportError as ie:
print('Could not import {0}'.format(NAME))
raise
# Second argument contains name of logging output file to use
FILE_HANDLER = logging.FileHandler(sys.argv[2], mode='w')
FILE_HANDLER.setFormatter(logging.Formatter())
FILE_HANDLER.setLevel(logging.INFO)
logging.root.addHandler(FILE_HANDLER)
try:
# Run main method within module with remaining arguments
if sys.argv[3:]:
MODULE.main(sys.argv[3:])
else:
MODULE.main()
except SystemExit:
# Do not report SystemExit errors to FogBugz: Just exit
raise
except AssertionError:
# Do not report AssertionErrors to FogBugz: Not a bug we care about
logging.exception('An assumption failed')
raise
except ftplib.error_temp:
logging.exception('NCBI FTP timed out')
raise
except:
# Should any other error occur, report it to FogBugz automatically
from bugzscout import report_error_to_email
MESSAGE = report_error_to_email()
logging.info('Automatic bug submission reported: %s', MESSAGE)
logging.exception('An error occurred')
raise
finally:
# Always remove logging handler from root
logging.root.removeHandler(FILE_HANDLER)
# Snippet to log available environment variables from inside a Galaxy tool:
# for key in sorted($searchList[2].keys())
# silent sys.stderr.write("\t{0} = {1} ({2})\n".format(str(key), str($searchList[2][key]), type($searchList[2][key])))
# end for
|
ODoSE/odose.nl
|
wrapper.py
|
Python
|
mit
| 1,997
|
[
"Galaxy"
] |
8b1eab58b61122c9261d67d9eb2849c4906c5e29b42388dd54961df44d05da7b
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dosewidget_QtDesign.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DoseWidget(object):
def setupUi(self, DoseWidget):
DoseWidget.setObjectName("DoseWidget")
DoseWidget.resize(937, 690)
self.gridLayout = QtWidgets.QGridLayout(DoseWidget)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(DoseWidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget_2 = QtWidgets.QWidget(self.splitter)
self.layoutWidget_2.setObjectName("layoutWidget_2")
self.imageLayout = QtWidgets.QVBoxLayout(self.layoutWidget_2)
self.imageLayout.setContentsMargins(0, 0, 0, 0)
self.imageLayout.setObjectName("imageLayout")
self.tabWidget = QtWidgets.QTabWidget(self.splitter)
self.tabWidget.setToolTip("")
self.tabWidget.setObjectName("tabWidget")
self.ViewTab = QtWidgets.QWidget()
self.ViewTab.setObjectName("ViewTab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.ViewTab)
self.verticalLayout_2.setContentsMargins(10, 10, 10, 10)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.ViewTab)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(self.ViewTab)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.doseMin = QtWidgets.QDoubleSpinBox(self.ViewTab)
self.doseMin.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.doseMin.setDecimals(4)
self.doseMin.setObjectName("doseMin")
self.horizontalLayout.addWidget(self.doseMin)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_3 = QtWidgets.QLabel(self.ViewTab)
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.doseMax = QtWidgets.QDoubleSpinBox(self.ViewTab)
self.doseMax.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.doseMax.setDecimals(4)
self.doseMax.setObjectName("doseMax")
self.horizontalLayout.addWidget(self.doseMax)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.bestLimits = QtWidgets.QPushButton(self.ViewTab)
self.bestLimits.setObjectName("bestLimits")
self.verticalLayout_2.addWidget(self.bestLimits)
self.line = QtWidgets.QFrame(self.ViewTab)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(0, -1, -1, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(-1, 0, 0, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.showIsoLines = QtWidgets.QCheckBox(self.ViewTab)
self.showIsoLines.setObjectName("showIsoLines")
self.horizontalLayout_6.addWidget(self.showIsoLines)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_15 = QtWidgets.QLabel(self.ViewTab)
self.label_15.setObjectName("label_15")
self.horizontalLayout_7.addWidget(self.label_15)
self.nominalDose = QtWidgets.QDoubleSpinBox(self.ViewTab)
self.nominalDose.setEnabled(False)
self.nominalDose.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.nominalDose.setDecimals(4)
self.nominalDose.setObjectName("nominalDose")
self.horizontalLayout_7.addWidget(self.nominalDose)
self.verticalLayout_3.addLayout(self.horizontalLayout_7)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem2)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.isoListField = QtWidgets.QTextEdit(self.ViewTab)
self.isoListField.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.isoListField.sizePolicy().hasHeightForWidth())
self.isoListField.setSizePolicy(sizePolicy)
self.isoListField.setObjectName("isoListField")
self.horizontalLayout_2.addWidget(self.isoListField)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.line_3 = QtWidgets.QFrame(self.ViewTab)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_2.addWidget(self.line_3)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.smooth = QtWidgets.QCheckBox(self.ViewTab)
self.smooth.setObjectName("smooth")
self.horizontalLayout_12.addWidget(self.smooth)
self.smoothFunction = QtWidgets.QComboBox(self.ViewTab)
self.smoothFunction.setObjectName("smoothFunction")
self.horizontalLayout_12.addWidget(self.smoothFunction)
self.verticalLayout_5.addLayout(self.horizontalLayout_12)
self.gaussSettingsLayout = QtWidgets.QHBoxLayout()
self.gaussSettingsLayout.setContentsMargins(-1, 0, -1, -1)
self.gaussSettingsLayout.setObjectName("gaussSettingsLayout")
spacerItem4 = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gaussSettingsLayout.addItem(spacerItem4)
self.label_23 = QtWidgets.QLabel(self.ViewTab)
self.label_23.setObjectName("label_23")
self.gaussSettingsLayout.addWidget(self.label_23)
self.smoothSigma = QtWidgets.QDoubleSpinBox(self.ViewTab)
self.smoothSigma.setProperty("value", 1.0)
self.smoothSigma.setObjectName("smoothSigma")
self.gaussSettingsLayout.addWidget(self.smoothSigma)
self.verticalLayout_5.addLayout(self.gaussSettingsLayout)
self.sgSettingsLayout = QtWidgets.QHBoxLayout()
self.sgSettingsLayout.setContentsMargins(-1, 0, -1, -1)
self.sgSettingsLayout.setObjectName("sgSettingsLayout")
spacerItem5 = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.sgSettingsLayout.addItem(spacerItem5)
self.smoothLabel1 = QtWidgets.QLabel(self.ViewTab)
self.smoothLabel1.setObjectName("smoothLabel1")
self.sgSettingsLayout.addWidget(self.smoothLabel1)
self.smoothWindowSize = QtWidgets.QSpinBox(self.ViewTab)
self.smoothWindowSize.setMinimum(3)
self.smoothWindowSize.setMaximum(100)
self.smoothWindowSize.setSingleStep(2)
self.smoothWindowSize.setProperty("value", 3)
self.smoothWindowSize.setProperty("toolTipDuration", -4)
self.smoothWindowSize.setObjectName("smoothWindowSize")
self.sgSettingsLayout.addWidget(self.smoothWindowSize)
self.smoothLabel2 = QtWidgets.QLabel(self.ViewTab)
self.smoothLabel2.setObjectName("smoothLabel2")
self.sgSettingsLayout.addWidget(self.smoothLabel2)
self.smoothOrder = QtWidgets.QSpinBox(self.ViewTab)
self.smoothOrder.setMinimum(0)
self.smoothOrder.setMaximum(100)
self.smoothOrder.setProperty("value", 2)
self.smoothOrder.setObjectName("smoothOrder")
self.sgSettingsLayout.addWidget(self.smoothOrder)
self.verticalLayout_5.addLayout(self.sgSettingsLayout)
self.verticalLayout_2.addLayout(self.verticalLayout_5)
self.line_6 = QtWidgets.QFrame(self.ViewTab)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.verticalLayout_2.addWidget(self.line_6)
self.refreshButton = QtWidgets.QPushButton(self.ViewTab)
self.refreshButton.setObjectName("refreshButton")
self.verticalLayout_2.addWidget(self.refreshButton)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem6)
self.line_2 = QtWidgets.QFrame(self.ViewTab)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_2.addWidget(self.line_2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.exportTxtButton = QtWidgets.QPushButton(self.ViewTab)
self.exportTxtButton.setObjectName("exportTxtButton")
self.horizontalLayout_5.addWidget(self.exportTxtButton)
self.exportNpButton = QtWidgets.QPushButton(self.ViewTab)
self.exportNpButton.setObjectName("exportNpButton")
self.horizontalLayout_5.addWidget(self.exportNpButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.tabWidget.addTab(self.ViewTab, "")
self.CalcTab = QtWidgets.QWidget()
self.CalcTab.setObjectName("CalcTab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.CalcTab)
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.verticalLayout.setObjectName("verticalLayout")
self.label_21 = QtWidgets.QLabel(self.CalcTab)
self.label_21.setObjectName("label_21")
self.verticalLayout.addWidget(self.label_21)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_9 = QtWidgets.QLabel(self.CalcTab)
self.label_9.setObjectName("label_9")
self.horizontalLayout_3.addWidget(self.label_9)
spacerItem7 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem7)
self.evalFunction = QtWidgets.QComboBox(self.CalcTab)
self.evalFunction.setObjectName("evalFunction")
self.horizontalLayout_3.addWidget(self.evalFunction)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem8)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.label_10 = QtWidgets.QLabel(self.CalcTab)
self.label_10.setObjectName("label_10")
self.verticalLayout.addWidget(self.label_10)
self.newInputGrid = QtWidgets.QGridLayout()
self.newInputGrid.setContentsMargins(0, 0, -1, -1)
self.newInputGrid.setObjectName("newInputGrid")
self.label_5 = QtWidgets.QLabel(self.CalcTab)
self.label_5.setObjectName("label_5")
self.newInputGrid.addWidget(self.label_5, 1, 1, 1, 1)
self.height = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.height.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.height.setDecimals(4)
self.height.setSingleStep(0.01)
self.height.setObjectName("height")
self.newInputGrid.addWidget(self.height, 1, 5, 1, 1)
self.label_4 = QtWidgets.QLabel(self.CalcTab)
self.label_4.setObjectName("label_4")
self.newInputGrid.addWidget(self.label_4, 0, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.CalcTab)
self.label_7.setObjectName("label_7")
self.newInputGrid.addWidget(self.label_7, 1, 4, 1, 1)
self.yCenter = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.yCenter.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.yCenter.setDecimals(4)
self.yCenter.setSingleStep(0.01)
self.yCenter.setObjectName("yCenter")
self.newInputGrid.addWidget(self.yCenter, 0, 5, 1, 1)
self.label_6 = QtWidgets.QLabel(self.CalcTab)
self.label_6.setObjectName("label_6")
self.newInputGrid.addWidget(self.label_6, 0, 4, 1, 1)
self.width = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.width.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.width.setDecimals(4)
self.width.setSingleStep(0.01)
self.width.setObjectName("width")
self.newInputGrid.addWidget(self.width, 1, 2, 1, 1)
self.xCenter = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.xCenter.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.xCenter.setDecimals(4)
self.xCenter.setSingleStep(0.01)
self.xCenter.setObjectName("xCenter")
self.newInputGrid.addWidget(self.xCenter, 0, 2, 1, 1)
self.angle = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.angle.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.angle.setDecimals(4)
self.angle.setMinimum(-360.0)
self.angle.setMaximum(360.0)
self.angle.setObjectName("angle")
self.newInputGrid.addWidget(self.angle, 2, 5, 1, 1)
self.label_8 = QtWidgets.QLabel(self.CalcTab)
self.label_8.setObjectName("label_8")
self.newInputGrid.addWidget(self.label_8, 2, 4, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.newInputGrid.addItem(spacerItem9, 0, 3, 1, 1)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.newInputGrid.addItem(spacerItem10, 0, 6, 1, 1)
self.verticalLayout.addLayout(self.newInputGrid)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.alternateSpecToggle = QtWidgets.QCheckBox(self.CalcTab)
self.alternateSpecToggle.setObjectName("alternateSpecToggle")
self.horizontalLayout_4.addWidget(self.alternateSpecToggle)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.oldInputGrid = QtWidgets.QGridLayout()
self.oldInputGrid.setContentsMargins(-1, 10, -1, -1)
self.oldInputGrid.setObjectName("oldInputGrid")
self.x0 = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.x0.setEnabled(False)
self.x0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.x0.setDecimals(4)
self.x0.setSingleStep(0.01)
self.x0.setObjectName("x0")
self.oldInputGrid.addWidget(self.x0, 0, 2, 1, 1)
self.x1 = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.x1.setEnabled(False)
self.x1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.x1.setDecimals(4)
self.x1.setSingleStep(0.01)
self.x1.setObjectName("x1")
self.oldInputGrid.addWidget(self.x1, 1, 2, 1, 1)
self.y1 = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.y1.setEnabled(False)
self.y1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.y1.setDecimals(4)
self.y1.setSingleStep(0.01)
self.y1.setObjectName("y1")
self.oldInputGrid.addWidget(self.y1, 1, 5, 1, 1)
self.y0 = QtWidgets.QDoubleSpinBox(self.CalcTab)
self.y0.setEnabled(False)
self.y0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.y0.setDecimals(4)
self.y0.setSingleStep(0.01)
self.y0.setObjectName("y0")
self.oldInputGrid.addWidget(self.y0, 0, 5, 1, 1)
self.label_11 = QtWidgets.QLabel(self.CalcTab)
self.label_11.setObjectName("label_11")
self.oldInputGrid.addWidget(self.label_11, 0, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.CalcTab)
self.label_12.setObjectName("label_12")
self.oldInputGrid.addWidget(self.label_12, 1, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.CalcTab)
self.label_13.setObjectName("label_13")
self.oldInputGrid.addWidget(self.label_13, 0, 4, 1, 1)
self.label_14 = QtWidgets.QLabel(self.CalcTab)
self.label_14.setObjectName("label_14")
self.oldInputGrid.addWidget(self.label_14, 1, 4, 1, 1)
spacerItem11 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.oldInputGrid.addItem(spacerItem11, 0, 3, 1, 1)
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.oldInputGrid.addItem(spacerItem12, 0, 6, 1, 1)
self.verticalLayout.addLayout(self.oldInputGrid)
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setContentsMargins(-1, 10, -1, -1)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.useAsCenter = QtWidgets.QCheckBox(self.CalcTab)
self.useAsCenter.setObjectName("useAsCenter")
self.horizontalLayout_13.addWidget(self.useAsCenter)
self.useAsMax = QtWidgets.QCheckBox(self.CalcTab)
self.useAsMax.setObjectName("useAsMax")
self.horizontalLayout_13.addWidget(self.useAsMax)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem13)
self.verticalLayout.addLayout(self.horizontalLayout_13)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.calculateButton = QtWidgets.QPushButton(self.CalcTab)
self.calculateButton.setObjectName("calculateButton")
self.horizontalLayout_11.addWidget(self.calculateButton)
self.verticalLayout.addLayout(self.horizontalLayout_11)
self.clearFitButton = QtWidgets.QPushButton(self.CalcTab)
self.clearFitButton.setObjectName("clearFitButton")
self.verticalLayout.addWidget(self.clearFitButton)
self.line_4 = QtWidgets.QFrame(self.CalcTab)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.verticalLayout.addWidget(self.line_4)
self.label_16 = QtWidgets.QLabel(self.CalcTab)
self.label_16.setObjectName("label_16")
self.verticalLayout.addWidget(self.label_16)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_17 = QtWidgets.QLabel(self.CalcTab)
self.label_17.setObjectName("label_17")
self.horizontalLayout_8.addWidget(self.label_17)
self.saveTablePath = QtWidgets.QLineEdit(self.CalcTab)
self.saveTablePath.setObjectName("saveTablePath")
self.horizontalLayout_8.addWidget(self.saveTablePath)
self.browseSaveTable = QtWidgets.QPushButton(self.CalcTab)
self.browseSaveTable.setObjectName("browseSaveTable")
self.horizontalLayout_8.addWidget(self.browseSaveTable)
self.verticalLayout.addLayout(self.horizontalLayout_8)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem14)
self.label_18 = QtWidgets.QLabel(self.CalcTab)
self.label_18.setObjectName("label_18")
self.horizontalLayout_9.addWidget(self.label_18)
self.filmNumber = QtWidgets.QLineEdit(self.CalcTab)
self.filmNumber.setObjectName("filmNumber")
self.horizontalLayout_9.addWidget(self.filmNumber)
self.saveCalculationData = QtWidgets.QPushButton(self.CalcTab)
self.saveCalculationData.setObjectName("saveCalculationData")
self.horizontalLayout_9.addWidget(self.saveCalculationData)
self.verticalLayout.addLayout(self.horizontalLayout_9)
spacerItem15 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem15)
self.tabWidget.addTab(self.CalcTab, "")
self.ExtraTab = QtWidgets.QWidget()
self.ExtraTab.setObjectName("ExtraTab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.ExtraTab)
self.verticalLayout_4.setContentsMargins(10, 10, 10, 10)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_22 = QtWidgets.QLabel(self.ExtraTab)
self.label_22.setObjectName("label_22")
self.verticalLayout_4.addWidget(self.label_22)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_19 = QtWidgets.QLabel(self.ExtraTab)
self.label_19.setObjectName("label_19")
self.horizontalLayout_10.addWidget(self.label_19)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.ExtraTab)
self.doubleSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.horizontalLayout_10.addWidget(self.doubleSpinBox)
self.label_20 = QtWidgets.QLabel(self.ExtraTab)
self.label_20.setObjectName("label_20")
self.horizontalLayout_10.addWidget(self.label_20)
spacerItem16 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem16)
self.depthDoseButton = QtWidgets.QPushButton(self.ExtraTab)
self.depthDoseButton.setObjectName("depthDoseButton")
self.horizontalLayout_10.addWidget(self.depthDoseButton)
spacerItem17 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem17)
self.verticalLayout_4.addLayout(self.horizontalLayout_10)
self.line_5 = QtWidgets.QFrame(self.ExtraTab)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.verticalLayout_4.addWidget(self.line_5)
spacerItem18 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem18)
self.tabWidget.addTab(self.ExtraTab, "")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(DoseWidget)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(DoseWidget)
DoseWidget.setTabOrder(self.tabWidget, self.doseMin)
DoseWidget.setTabOrder(self.doseMin, self.doseMax)
DoseWidget.setTabOrder(self.doseMax, self.bestLimits)
DoseWidget.setTabOrder(self.bestLimits, self.showIsoLines)
DoseWidget.setTabOrder(self.showIsoLines, self.nominalDose)
DoseWidget.setTabOrder(self.nominalDose, self.isoListField)
DoseWidget.setTabOrder(self.isoListField, self.refreshButton)
DoseWidget.setTabOrder(self.refreshButton, self.exportTxtButton)
DoseWidget.setTabOrder(self.exportTxtButton, self.exportNpButton)
DoseWidget.setTabOrder(self.exportNpButton, self.evalFunction)
DoseWidget.setTabOrder(self.evalFunction, self.xCenter)
DoseWidget.setTabOrder(self.xCenter, self.yCenter)
DoseWidget.setTabOrder(self.yCenter, self.width)
DoseWidget.setTabOrder(self.width, self.height)
DoseWidget.setTabOrder(self.height, self.angle)
DoseWidget.setTabOrder(self.angle, self.alternateSpecToggle)
DoseWidget.setTabOrder(self.alternateSpecToggle, self.x0)
DoseWidget.setTabOrder(self.x0, self.y0)
DoseWidget.setTabOrder(self.y0, self.x1)
DoseWidget.setTabOrder(self.x1, self.y1)
DoseWidget.setTabOrder(self.y1, self.calculateButton)
DoseWidget.setTabOrder(self.calculateButton, self.clearFitButton)
DoseWidget.setTabOrder(self.clearFitButton, self.saveTablePath)
DoseWidget.setTabOrder(self.saveTablePath, self.browseSaveTable)
DoseWidget.setTabOrder(self.browseSaveTable, self.filmNumber)
DoseWidget.setTabOrder(self.filmNumber, self.saveCalculationData)
DoseWidget.setTabOrder(self.saveCalculationData, self.doubleSpinBox)
DoseWidget.setTabOrder(self.doubleSpinBox, self.depthDoseButton)
def retranslateUi(self, DoseWidget):
_translate = QtCore.QCoreApplication.translate
DoseWidget.setWindowTitle(_translate("DoseWidget", "Form"))
self.label.setText(_translate("DoseWidget", "dose scale limits:"))
self.label_2.setText(_translate("DoseWidget", "min"))
self.label_3.setText(_translate("DoseWidget", "max"))
self.bestLimits.setText(_translate("DoseWidget", "restore default limits"))
self.showIsoLines.setToolTip(_translate("DoseWidget", "Check to show iso dose lines, requires a referesh"))
self.showIsoLines.setText(_translate("DoseWidget", "show iso dose lines"))
self.label_15.setText(_translate("DoseWidget", "nominal dose"))
self.nominalDose.setToolTip(_translate("DoseWidget", "From this dose the percentages are calculated to draw the iso dose lines"))
self.isoListField.setHtml(_translate("DoseWidget", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\'; font-size:8pt;\">80</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\'; font-size:8pt;\">60</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\'; font-size:8pt;\">40</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\'; font-size:8pt;\">20</span></p></body></html>"))
self.smooth.setText(_translate("DoseWidget", "smooth data with"))
self.label_23.setText(_translate("DoseWidget", "sigma"))
self.smoothSigma.setToolTip(_translate("DoseWidget", "Sigma of the gaussian smoothing in pixels"))
self.smoothLabel1.setText(_translate("DoseWidget", "window size"))
self.smoothWindowSize.setToolTip(_translate("DoseWidget", "Window size for the Savitzky-Golay filter. Larger window size results in strong smoothing, can only be odd."))
self.smoothLabel2.setText(_translate("DoseWidget", "order"))
self.smoothOrder.setToolTip(_translate("DoseWidget", "Order of the polynomial fitted in the Savitzky-Golay filter, must be windowSize-1, smaller will smooth more strongly. Order of 0 should be equivalent to a moving average."))
self.refreshButton.setText(_translate("DoseWidget", "refresh dose plot"))
self.exportTxtButton.setToolTip(_translate("DoseWidget", "export the dose distribution into a txt file to use elsewhere (seperator is tab)"))
self.exportTxtButton.setText(_translate("DoseWidget", "export as txt"))
self.exportNpButton.setToolTip(_translate("DoseWidget", "export the dose distribution into a npy file which can be loaded by numpy.load() in python (smaller than txt)"))
self.exportNpButton.setText(_translate("DoseWidget", "export as numpy"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.ViewTab), _translate("DoseWidget", "View and Export"))
self.label_21.setText(_translate("DoseWidget", "analyze dose distribution:"))
self.label_9.setText(_translate("DoseWidget", "evalution method"))
self.evalFunction.setToolTip(_translate("DoseWidget", "select what to do and over what area"))
self.label_10.setText(_translate("DoseWidget", "region of interest for evaluation"))
self.label_5.setText(_translate("DoseWidget", "width"))
self.height.setToolTip(_translate("DoseWidget", "height (y-direction), not used for profile"))
self.label_4.setText(_translate("DoseWidget", "x-center"))
self.label_7.setText(_translate("DoseWidget", "height"))
self.yCenter.setToolTip(_translate("DoseWidget", "y coordinate of the center of the ROI"))
self.label_6.setText(_translate("DoseWidget", "y-center"))
self.width.setToolTip(_translate("DoseWidget", "width (x-direction) of ROI or length of profile"))
self.xCenter.setToolTip(_translate("DoseWidget", "x coordinate of the center of the ROI"))
self.angle.setToolTip(_translate("DoseWidget", "roation angle of ROI (counter clockwise)"))
self.label_8.setText(_translate("DoseWidget", "angle"))
self.alternateSpecToggle.setText(_translate("DoseWidget", "use alternative/old region specification"))
self.label_11.setText(_translate("DoseWidget", "x0"))
self.label_12.setText(_translate("DoseWidget", "x1"))
self.label_13.setText(_translate("DoseWidget", "y0"))
self.label_14.setText(_translate("DoseWidget", "y1"))
self.useAsCenter.setToolTip(_translate("DoseWidget", "Use calculation results as input for x-center and y-center of ROI. Works only with methods that calculate a center-like coordinate, e.g. 2D Gauss or Max."))
self.useAsCenter.setText(_translate("DoseWidget", "use result as center"))
self.useAsMax.setToolTip(_translate("DoseWidget", "Use the calculation result as max for the dose limits in the visualization. Only applicable to methods that output a maxlike value, e.g. 2D Gauss or center of mass."))
self.useAsMax.setText(_translate("DoseWidget", "use result as max"))
self.calculateButton.setText(_translate("DoseWidget", "calculate"))
self.clearFitButton.setToolTip(_translate("DoseWidget", "remove the contour plot and the center marker created by 2D fit from the dose plot"))
self.clearFitButton.setText(_translate("DoseWidget", "clear 2D fit"))
self.label_16.setText(_translate("DoseWidget", "save calculation results to file:"))
self.label_17.setText(_translate("DoseWidget", "save results to:"))
self.saveTablePath.setToolTip(_translate("DoseWidget", "Path to save the data to. Each save operation appends an new line."))
self.browseSaveTable.setToolTip(_translate("DoseWidget", "browse for the file instead of typing the path"))
self.browseSaveTable.setText(_translate("DoseWidget", "browse"))
self.label_18.setText(_translate("DoseWidget", "film no."))
self.filmNumber.setToolTip(_translate("DoseWidget", "Give the number of the film. This is written as the first column to the save file."))
self.saveCalculationData.setToolTip(_translate("DoseWidget", "Calculate and save the results of the calculation along with the data settings to reproduce it to a new line in the save file."))
self.saveCalculationData.setText(_translate("DoseWidget", "save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.CalcTab), _translate("DoseWidget", "Calculate"))
self.label_22.setText(_translate("DoseWidget", "calculate the depth dose"))
self.label_19.setText(_translate("DoseWidget", "integration radius"))
self.doubleSpinBox.setToolTip(_translate("DoseWidget", "Lateral distance from the maximum that is considered in the integration. Should be large enough to include the entire beam at the distal edge."))
self.label_20.setText(_translate("DoseWidget", "cm"))
self.depthDoseButton.setToolTip(_translate("DoseWidget", "Determine the maximum in y-direction for each slice in x-direction. Integrate over a circular area around the maximum und display this as the depth dose curve."))
self.depthDoseButton.setText(_translate("DoseWidget", "show depth dose"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.ExtraTab), _translate("DoseWidget", "Extras"))
|
mgotz/EBT_evaluation
|
ebttools/gui/dosewidget_ui_qt5.py
|
Python
|
mit
| 34,994
|
[
"Gaussian"
] |
56e3486d986ef26c2f9fffca43f775e7ec0830904673086648dfa0b7bbd07893
|
"""
Package for Gaussian Process Optimization
=========================================
This package provides optimization functionality
for hyperparameters of covariance functions
:py:class:`pygp.covar` given.
"""
# import scipy:
import scipy as SP
import scipy.optimize as OPT
import logging as LG
import pdb
# LG.basicConfig(level=LG.INFO)
def param_dict_to_list(dict,skeys=None):
"""convert from param dictionary to list"""
#sort keys
RV = SP.concatenate([dict[key].flatten() for key in skeys])
return RV
pass
def param_list_to_dict(list,param_struct,skeys):
"""convert from param dictionary to list
param_struct: structure of parameter array
"""
RV = []
i0= 0
for key in skeys:
val = param_struct[key]
shape = SP.array(val)
np = shape.prod()
i1 = i0+np
params = list[i0:i1].reshape(shape)
RV.append((key,params))
i0 = i1
return dict(RV)
def checkgrad(f, fprime, x, *args,**kw_args):
"""
Analytical gradient calculation using a 3-point method
"""
import numpy as np
# using machine precision to choose h
eps = np.finfo(float).eps
step = np.sqrt(eps)*(x.min())
# shake things up a bit by taking random steps for each x dimension
h = step*np.sign(np.random.uniform(-1, 1, x.size))
f_ph = f(x+h, *args, **kw_args)
f_mh = f(x-h, *args, **kw_args)
numerical_gradient = (f_ph - f_mh)/(2*h)
analytical_gradient = fprime(x, *args, **kw_args)
ratio = (f_ph - f_mh)/(2*np.dot(h, analytical_gradient))
if True:
h = np.zeros_like(x)
for i in range(len(x)):
h[i] = step
f_ph = f(x+h, *args, **kw_args)
f_mh = f(x-h, *args, **kw_args)
numerical_gradient = (f_ph - f_mh)/(2*step)
analytical_gradient = fprime(x, *args, **kw_args)[i]
ratio = (f_ph - f_mh)/(2*step*analytical_gradient)
h[i] = 0
print "[%d] numerical: %f, analytical: %f, ratio: %f" % (i, numerical_gradient,
analytical_gradient,
ratio)
def opt_hyper(gpr,hyperparams,Ifilter=None,maxiter=1000,gradcheck=False,bounds = None,optimizer=OPT.fmin_tnc,gradient_tolerance=1E-4,*args,**kw_args):
"""
Optimize hyperparemters of :py:class:`pygp.gp.basic_gp.GP` ``gpr`` starting from given hyperparameters ``hyperparams``.
**Parameters:**
gpr : :py:class:`pygp.gp.basic_gp`
GP regression class
hyperparams : {'covar':logtheta, ...}
Dictionary filled with starting hyperparameters
for optimization. logtheta are the CF hyperparameters.
Ifilter : [boolean]
Index vector, indicating which hyperparameters shall
be optimized. For instance::
logtheta = [1,2,3]
Ifilter = [0,1,0]
means that only the second entry (which equals 2 in
this example) of logtheta will be optimized
and the others remain untouched.
bounds : [[min,max]]
Array with min and max value that can be attained for any hyperparameter
maxiter: int
maximum number of function evaluations
gradcheck: boolean
check gradients comparing the analytical gradients to their approximations
optimizer: :py:class:`scipy.optimize`
which scipy optimizer to use? (standard lbfgsb)
** argument passed onto LML**
priors : [:py:class:`pygp.priors`]
non-default prior, otherwise assume
first index amplitude, last noise, rest:lengthscales
"""
def f(x):
x_ = X0
x_[Ifilter_x] = x
rv = gpr.LML(param_list_to_dict(x_,param_struct,skeys),*args,**kw_args)
#LG.debug("L("+str(x_)+")=="+str(rv))
if SP.isnan(rv):
return 1E6
return rv
def df(x):
x_ = X0
x_[Ifilter_x] = x
rv = gpr.LMLgrad(param_list_to_dict(x_,param_struct,skeys),*args,**kw_args)
rv = param_dict_to_list(rv,skeys)
#LG.debug("dL("+str(x_)+")=="+str(rv))
if not SP.isfinite(rv).all(): #SP.isnan(rv).any():
In = SP.isnan(rv)
rv[In] = 1E6
return rv[Ifilter_x]
#0. store parameter structure
skeys = SP.sort(hyperparams.keys())
param_struct = dict([(name,hyperparams[name].shape) for name in skeys])
#1. convert the dictionaries to parameter lists
X0 = param_dict_to_list(hyperparams,skeys)
if Ifilter is not None:
Ifilter_x = SP.array(param_dict_to_list(Ifilter,skeys),dtype='bool')
else:
Ifilter_x = SP.ones(len(X0),dtype='bool')
#2. bounds
if bounds is not None:
#go through all hyperparams and build bound array (flattened)
_b = []
for key in skeys:
if key in bounds.keys():
_b.extend(bounds[key])
else:
_b.extend([(-SP.inf,+SP.inf)]*hyperparams[key].size)
bounds = SP.array(_b)
bounds = bounds[Ifilter_x]
pass
#2. set stating point of optimization, truncate the non-used dimensions
x = X0.copy()[Ifilter_x]
LG.debug("startparameters for opt:"+str(x))
if gradcheck:
checkgrad(f, df, x)
LG.info("check_grad (pre) (Enter to continue):" + str(OPT.check_grad(f,df,x)))
raw_input()
LG.debug("start optimization")
#general optimizer interface
#note: x is a subset of X, indexing the parameters that are optimized over
# Ifilter_x pickes the subest of X, yielding x
opt_RV=optimizer(f, x, fprime=df, maxfun=int(maxiter),pgtol=gradient_tolerance, messages=False, bounds=bounds)
# optimizer = OPT.fmin_l_bfgs_b
# opt_RV=optimizer(f, x, fprime=df, maxfun=int(maxiter),iprint =1, bounds=bounds, factr=10.0, pgtol=1e-10)
opt_x = opt_RV[0]
#relate back to X
Xopt = X0.copy()
Xopt[Ifilter_x] = opt_x
#convert into dictionary
opt_hyperparams = param_list_to_dict(Xopt,param_struct,skeys)
#get the log marginal likelihood at the optimum:
opt_lml = gpr.LML(opt_hyperparams,**kw_args)
if gradcheck:
checkgrad(f, df, opt_RV[0])
LG.info("check_grad (post) (Enter to continue):" + str(OPT.check_grad(f,df,opt_RV[0])))
pdb.set_trace()
# raw_input()
LG.debug("old parameters:")
LG.debug(str(hyperparams))
LG.debug("optimized parameters:")
LG.debug(str(opt_hyperparams))
LG.debug("grad:"+str(df(opt_x)))
return [opt_hyperparams,opt_lml]
|
PMBio/pygp
|
pygp/optimize/optimize_base.py
|
Python
|
gpl-2.0
| 6,442
|
[
"Gaussian"
] |
ba2dc90b4e777924b1d39a7b0f98bd7332b42f8d5a3228f21a4d46b04b1e0383
|
from __future__ import absolute_import
import torch
import numpy as np
import pandas as pd
import scipy
import os
import copy
from pysurvival import utils
from pysurvival.utils import optimization as opt
from pysurvival.models import BaseModel
from pysurvival.models._svm import _SVMModel
# Available Kernel functions
KERNELS = { 'Linear': 0, 'Polynomial': 1, 'Gaussian':2, 'Normal':2,
'Exponential':3, 'Tanh':4, 'Sigmoid': 5, 'Rational Quadratic':6,
'Inverse Multiquadratic': 7, 'Multiquadratic': 8}
REVERSE_KERNELS = {value:key for (key, value) in KERNELS.items() }
class SurvivalSVMModel(BaseModel):
""" Survival Support Vector Machine model:
--------------------------------------
The purpose of the model is to help us look at Survival Analysis
as a Ranking Problem.
Indeed, the idea behind formulating the survival problem as a ranking
problem is that in some applications, like clinical applications,
one is only interested in defining risks groups, and not the prediction
of the survival time, but in whether the unit has a high or low risk for
the event to occur.
The current implementation is based on the "Rank Support Vector Machines
(RankSVMs)" developed by Van Belle et al. This allows us to compute a
convex quadratic loss function, so that we can use the Newton
optimization to minimize it.
References:
* Fast Training of Support Vector Machines for Survival Analysis
from Sebastian Posterl, Nassir Navab, and Amin Katouzian
https://link.springer.com/chapter/10.1007/978-3-319-23525-7_15
* An Efficient Training Algorithm for Kernel Survival Support Vector
Machines from Sebastian Posterl, Nassir Navab, and Amin Katouzian
https://arxiv.org/abs/1611.07054
* Support vector machines for survival analysis.
Van Belle, V., Pelckmans, K., Suykens, J.A., Van Huffel, S.
ftp://ftp.esat.kuleuven.be/sista/kpelckma/kp07-70.pdf
Parameters:
-----------
* kernel: str (default="linear")
The type of kernel used to fit the model. Here's the list
of available kernels:
* linear
* polynomial
* gaussian
* exponential
* tanh
* sigmoid
* rational_quadratic
* inverse_multiquadratic
* multiquadratic
* scale: float (default=1)
Scale parameter of the kernel function
* offset: float (default=0)
Offset parameter of the kernel function
* degree: float (default=1)
Degree parameter of the polynomial/kernel function
"""
def __init__(self, kernel = "linear", scale=1., offset=0., degree=1.,
auto_scaler = True):
# Ensuring that the provided kernel is available
valid_kernel = [key for key in KERNELS.keys() \
if kernel.lower().replace('_', ' ') in key.lower().replace('_', ' ')]
if len(valid_kernel) == 0:
raise NotImplementedError('{} is not a valid kernel function.'
.format(kernel))
else:
kernel_type = KERNELS[valid_kernel[0]]
kernel = valid_kernel[0]
# Checking the kernel parameters
if not (degree >= 0. and \
(isinstance(degree, float) or isinstance(degree, int)) ):
error = "degree parameter is not valid. degree is a >= 0 value"
raise ValueError(error)
if not (isinstance(scale, float) or isinstance(scale, int)):
error = "scale parameter is not valid."
raise ValueError(error)
if not (isinstance(offset, float) or isinstance(offset, int)):
error = "offset parameter is not valid."
raise ValueError(error)
# Saving the attributes
self.kernel = kernel
self.kernel_type = kernel_type
self.scale = scale
self.offset = offset
self.degree = degree
# Initializing the C++ object
self.model = _SVMModel( self.kernel_type, self.scale, self.offset,
self.degree)
# Initializing the elements from BaseModel
super(SurvivalSVMModel, self).__init__(auto_scaler)
def __repr__(self):
""" Creates the representation of the Object """
self.name = self.__class__.__name__
if 'kernel' in self.name :
self.name += "(kernel: '{}'".format(self.kernel) + ')'
return self.name
def save(self, path_file):
""" Save the model paremeters of the model (.params) and Compress
them into a zip file
"""
# Ensuring the file has the proper name
folder_name = os.path.dirname(path_file) + '/'
file_name = os.path.basename(path_file)
# Checking if the folder is accessible
if not os.access(folder_name, os.W_OK):
error_msg = '{} is not an accessible directory.'.format(folder_name)
raise OSError(error_msg)
# Delete the C++ object before saving
del self.model
# Saving the model
super(SurvivalSVMModel, self).save(path_file)
# Re-introduce the C++ object
self.model = _SVMModel( self.kernel_type, self.scale, self.offset,
self.degree)
self.load_properties()
def load(self, path_file):
""" Load the model parameters from a zip file into a C++ external
model
"""
# Loading the model
super(SurvivalSVMModel, self).load(path_file)
# Re-introduce the C++ object
self.model = _SVMModel( self.kernel_type, self.scale, self.offset,
self.degree)
self.load_properties()
def fit(self, X, T, E, with_bias = True, init_method='glorot_normal',
lr = 1e-2, max_iter = 100, l2_reg = 1e-4, tol = 1e-3,
verbose = True):
"""
Fitting a Survival Support Vector Machine model.
As the Hessian matrix of the log-likelihood can be
calculated without too much effort, the model parameters are
computed using the Newton_Raphson Optimization scheme:
W_new = W_old - lr*<Hessian^(-1), gradient>
Arguments:
---------
* `X` : array-like, shape=(n_samples, n_features)
The input samples.
* `T` : array-like, shape = [n_samples]
The target values describing when the event of interest or censoring
occurred
* `E` : array-like, shape = [n_samples]
The Event indicator array such that E = 1. if the event occurred
E = 0. if censoring occurred
* `with_bias`: bool (default=True)
Whether a bias should be added
* `init_method` : str (default = 'glorot_uniform')
Initialization method to use. Here are the possible options:
* 'glorot_uniform': Glorot/Xavier uniform initializer,
* 'he_uniform': He uniform variance scaling initializer
* 'uniform': Initializing tensors with uniform (-1, 1) distribution
* 'glorot_normal': Glorot normal initializer,
* 'he_normal': He normal initializer.
* 'normal': Initializing tensors with standard normal distribution
* 'ones': Initializing tensors to 1
* 'zeros': Initializing tensors to 0
* 'orthogonal': Initializing tensors with a orthogonal matrix,
* `lr`: float (default=1e-4)
learning rate used in the optimization
* `max_iter`: int (default=100)
The maximum number of iterations in the Newton optimization
* `l2_reg`: float (default=1e-4)
L2 regularization parameter for the model coefficients
* `alpha`: float (default=0.95)
Confidence interval
* `tol`: float (default=1e-3)
Tolerance for stopping criteria
* `verbose`: bool (default=True)
Whether or not producing detailed logging about the modeling
Example:
--------
#### 1 - Importing packages
import numpy as np
import pandas as pd
from pysurvival.models.svm import LinearSVMModel
from pysurvival.models.svm import KernelSVMModel
from pysurvival.models.simulations import SimulationModel
from pysurvival.utils.metrics import concordance_index
from sklearn.model_selection import train_test_split
from scipy.stats.stats import pearsonr
# %pylab inline # to use in jupyter notebooks
#### 2 - Generating the dataset from the parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'Log-Logistic',
risk_type = 'linear',
censored_parameter = 1.1,
alpha = 1.5, beta = 4)
# Generating N Random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features = 4)
#### 3 - Splitting the dataset into training and testing sets
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Creating an instance of the SVM model and fitting the data.
svm_model = LinearSVMModel()
svm_model = KernelSVMModel(kernel='Gaussian', scale=0.25)
svm_model.fit(X_train, T_train, E_train, init_method='he_uniform',
with_bias = True, lr = 0.5, tol = 1e-3, l2_reg = 1e-3)
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(svm_model, X_test, T_test, E_test) #0.93
print('C-index: {:.2f}'.format(c_index))
#### 6 - Comparing the model predictions to Actual risk score
# Comparing risk scores
svm_risks = svm_model.predict_risk(X_test)
actual_risks = sim.predict_risk(X_test).flatten()
print("corr={:.4f}, p_value={:.5f}".format(*pearsonr(svm_risks,
actual_risks)))# corr=-0.9992, p_value=0.00000
"""
# Collecting features names
N, self.num_vars = X.shape
if isinstance(X, pd.DataFrame):
self.variables = X.columns.tolist()
else:
self.variables = ['x_{}'.format(i) for i in range(self.num_vars)]
# Adding a bias or not
self.with_bias = with_bias
if with_bias:
self.variables += ['intercept']
p = int(self.num_vars + 1.*with_bias)
# Checking the format of the data
X, T, E = utils.check_data(X, T, E)
if with_bias:
# Adding the intercept
X = np.c_[X, [1.]*N]
X = self.scaler.fit_transform( X )
# Initializing the parameters
if self.kernel_type == 0:
W = np.zeros((p, 1))
else:
W = np.zeros((N, 1))
W = opt.initialization(init_method, W, False).flatten()
W = W.astype(np.float64)
# Optimizing to find best parameters
self.model.newton_optimization(X, T, E, W, lr, l2_reg,
tol, max_iter, verbose)
self.save_properties()
return self
def save_properties(self):
""" Loading the properties of the model """
self.weights = np.array( self.model.W )
self.Kernel_Matrix = np.array( self.model.Kernel_Matrix )
self.kernel_type = self.model.kernel_type
self.scale = self.model.scale
self.offset = self.model.offset
self.degree = self.model.degree
self.loss = np.array( self.model.loss )
self.inv_Hessian = np.array( self.model.inv_Hessian )
self.loss_values = np.array( self.model.loss_values )
self.grad2_values = np.array( self.model.grad2_values )
self.internal_X = np.array( self.model.internal_X )
def load_properties(self):
""" Loading the properties of the model """
self.model.W = self.weights
self.model.Kernel_Matrix = self.Kernel_Matrix
self.model.kernel_type = self.kernel_type
self.model.scale = self.scale
self.model.offset = self.offset
self.model.degree = self.degree
self.model.loss = self.loss
self.model.inv_Hessian = self.inv_Hessian
self.model.loss_values = self.loss_values
self.model.grad2_values = self.grad2_values
self.model.internal_X = self.internal_X
self.kernel = REVERSE_KERNELS[self.kernel_type]
def predict_risk(self, x, use_log = False):
""" Predicts the Risk Score
Parameter
----------
* `x`, np.ndarray
array-like representing the datapoints
* `use_log`: bool - (default=False)
Applies the log function to the risk values
Returns
-------
* `risk_score`, np.ndarray
array-like representing the prediction of Risk Score function
"""
# Ensuring that the C++ model has the fitted parameters
self.load_properties()
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
if self.with_bias:
x = np.r_[x, 1.]
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
n = x.shape[0]
if self.with_bias:
x = np.c_[x, [1.]*n]
x = self.scaler.transform( x )
# Calculating prdiction
risk = np.exp( self.model.get_score(x) )
if use_log:
return np.log( risk )
else:
return risk
def predict_cumulative_hazard(self, *args, **kargs):
raise NotImplementedError(self.not_implemented_error)
def predict_cdf(self, *args, **kargs):
raise NotImplementedError(self.not_implemented_error)
def predict_survival(self, *args, **kargs):
raise NotImplementedError(self.not_implemented_error)
def predict_density(self, *args, **kargs):
raise NotImplementedError(self.not_implemented_error)
def predict_hazard(self, *args, **kargs):
raise NotImplementedError(self.not_implemented_error)
class LinearSVMModel(SurvivalSVMModel):
def __init__(self, auto_scaler = True):
super(LinearSVMModel, self).__init__(kernel = "linear", scale=1.,
offset=0., degree=1., auto_scaler = True)
class KernelSVMModel(SurvivalSVMModel):
def __init__(self, kernel = "gaussian", scale=1., offset=0., degree=1.,
auto_scaler = True):
if "linear" in kernel.lower():
error = "To use a 'linear' svm model, create an instance of"
error += "pysurvival.models.svm.LinearSVMModel"
raise ValueError(error)
super(KernelSVMModel, self).__init__(kernel = kernel, scale=scale,
offset=offset, degree=degree, auto_scaler = auto_scaler)
|
square/pysurvival
|
pysurvival/models/svm.py
|
Python
|
apache-2.0
| 15,908
|
[
"Gaussian"
] |
6378b5b1d0d76e2cbefd6ed4ba838e019d12ba31a1276e7f998adab24dadb654
|
"""Tools for model-based motion correction
Some more text here.
"""
import os.path as op
import numpy as np
import ipywidgets as wdg
import IPython.display as display
from IPython.display import Image
import matplotlib.pyplot as plt
import nibabel as nib
import dipy.core.gradients as dpg
from dipy.align.metrics import CCMetric, EMMetric, SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
from dipy.align.imaffine import (transform_centers_of_mass,
AffineMap,
MutualInformationMetric,
AffineRegistration)
from dipy.align.transforms import (TranslationTransform3D,
RigidTransform3D,
AffineTransform3D)
syn_metric_dict = {'CC': CCMetric,
'EM': EMMetric,
'SSD': SSDMetric}
def syn_registration(moving, static,
moving_grid2world=None,
static_grid2world=None,
step_length=0.25,
metric='CC',
dim=3,
level_iters=[10, 10, 5],
sigma_diff=2.0,
prealign=None):
"""Register a source image (moving) to a target image (static).
Parameters
----------
moving : ndarray
The source image data to be registered
moving_grid2world : array, shape (4,4)
The affine matrix associated with the moving (source) data.
static : ndarray
The target image data for registration
static_grid2world : array, shape (4,4)
The affine matrix associated with the static (target) data
metric : string, optional
The metric to be optimized. One of `CC`, `EM`, `SSD`,
Default: CCMetric.
dim: int (either 2 or 3), optional
The dimensions of the image domain. Default: 3
level_iters : list of int, optional
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used).
Returns
-------
warped_moving : ndarray
The data in `moving`, warped towards the `static` data.
forward : ndarray (..., 3)
The vector field describing the forward warping from the source to the
target.
backward : ndarray (..., 3)
The vector field describing the backward warping from the target to the
source.
"""
use_metric = syn_metric_dict[metric](dim, sigma_diff=sigma_diff)
sdr = SymmetricDiffeomorphicRegistration(use_metric, level_iters,
step_length=step_length)
mapping = sdr.optimize(static, moving,
static_grid2world=static_grid2world,
moving_grid2world=moving_grid2world,
prealign=prealign)
warped_moving = mapping.transform(moving)
return warped_moving, mapping
def resample(moving, static, moving_grid2world, static_grid2world):
"""Resample an image from one space to another."""
identity = np.eye(4)
affine_map = AffineMap(identity,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
resampled = affine_map.transform(moving)
# Affine registration pipeline:
affine_metric_dict = {'MI': MutualInformationMetric}
def c_of_mass(moving, static, static_grid2world, moving_grid2world,
reg, starting_affine, params0=None):
transform = transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world)
transformed = transform.transform(moving)
return transformed, transform.affine
def translation(moving, static, static_grid2world, moving_grid2world,
reg, starting_affine, params0=None):
transform = TranslationTransform3D()
translation = reg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
return translation.transform(moving), translation.affine
def rigid(moving, static, static_grid2world, moving_grid2world,
reg, starting_affine, params0=None):
transform = RigidTransform3D()
rigid = reg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
return rigid.transform(moving), rigid.affine
def affine(moving, static, static_grid2world, moving_grid2world,
reg, starting_affine, params0=None):
transform = AffineTransform3D()
affine = reg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
return affine.transform(moving), affine.affine
def affine_registration(moving, static,
moving_grid2world=None,
static_grid2world=None,
nbins=32,
sampling_prop=None,
metric='MI',
pipeline=[c_of_mass, translation, rigid, affine],
level_iters=[10000, 1000, 100],
sigmas=[5.0, 2.5, 0.0],
factors=[4, 2, 1],
params0=None):
"""
Find the affine transformation between two 3D images.
"""
# Define the Affine registration object we'll use with the chosen metric:
use_metric = affine_metric_dict[metric](nbins, sampling_prop)
affreg = AffineRegistration(metric=use_metric,
level_iters=level_iters,
sigmas=sigmas,
factors=factors)
# Bootstrap this thing with the identity:
starting_affine = np.eye(4)
# Go through the selected transformation:
for func in pipeline:
transformed, starting_affine = func(moving, static,
static_grid2world,
moving_grid2world,
affreg, starting_affine,
params0)
return transformed, starting_affine
def register_series(series, ref, pipeline):
""" Register a series to a reference image.
Parameters
----------
series : Nifti1Image object
The data is 4D with the last dimension separating different 3D volumes
ref : Nifti1Image or integer or iterable
"""
if isinstance(ref, nib.Nifti1Image):
static = ref
static_data = static.get_data()
s_g2w = static.get_affine()
moving = series
moving_data = moving.get_data()
m_g2w = moving.get_affine()
elif isinstance(ref, int) or np.iterable(ref):
data = series.get_data()
idxer = np.zeros(data.shape[-1]).astype(bool)
idxer[ref] = True
static_data = data[..., idxer]
if len(static_data.shape) > 3:
static_data = np.mean(static_data, -1)
moving_data = data[..., ~idxer]
m_g2w = s_g2w = series.affine
affine_list = []
transformed_list = []
for ii in range(moving_data.shape[-1]):
this_moving = moving_data[..., ii]
transformed, affine = affine_registration(this_moving, static_data,
moving_grid2world=m_g2w,
static_grid2world=s_g2w,
pipeline=pipeline)
transformed_list.append(transformed)
affine_list.append(affine)
return transformed_list, affine_list
def make_widget(data, cmap='bone', dims=4, contours=False):
"""Create an ipython widget for displaying 3D/4D data."""
def plot_image3d(z=data.shape[-1]//2):
fig, ax = plt.subplots(1)
im = ax.imshow(data[:, :, z], cmap=cmap, vmax=np.max(data),
vmin=np.min(data))
if contours:
cc = measure.find_contours(data[:, :, z], contours)
for n, c in enumerate(cc):
ax.plot(c[:, 1], c[:, 0], linewidth=2)
plt.colorbar(im)
fig.set_size_inches([10, 10])
plt.show()
def plot_image4d(z=data.shape[-2]//2, b=data.shape[-1]//2):
fig, ax = plt.subplots(1)
im = ax.imshow(data[:, :, z, b], cmap=cmap, vmax=np.max(data),
vmin=np.min(data))
fig.set_size_inches([10, 10])
plt.colorbar(im)
plt.show()
if dims == 4:
pb_widget = wdg.interactive(plot_image4d,
z=wdg.IntSlider(min=0,
max=data.shape[-2]-1,
value=data.shape[-2]//2),
b=wdg.IntSlider(min=0,
max=data.shape[-1]-1,
value=0))
elif dims == 3:
if len(data.shape) == 4:
# RGB images:
zidx = -2
else:
zidx = -1
zmax = data.shape[zidx] - 1
zval = data.shape[zidx] // 2
pb_widget = wdg.interactive(plot_image3d,
z=wdg.IntSlider(min=0,
max=zmax,
value=zval))
display.display(pb_widget)
|
arokem/model_mc
|
tools.py
|
Python
|
bsd-3-clause
| 9,697
|
[
"Gaussian"
] |
558a70e520aac479dfc57f1e7c0331d077fb881b2f8583731e10c51b3f066e40
|
from ..codes import CodeOutput
import os
import re
import json
class SiestaOutput(CodeOutput):
def __init__(self, outputfile='siesta.out'):
CodeOutput.__init__(self)
self.outputfile = None
self.output_values = None
self.data = None
if os.path.isfile(outputfile):
self.outputfile = outputfile
if self.is_finished:
self.read()
@property
def is_finished(self):
if self.outputfile is None:
return False
rf = open(self.outputfile)
data = rf.read()
rf.close()
if data[-14:] == 'Job completed\n':
return True
else:
return False
def read(self):
if not os.path.isfile(self.outputfile):
raise ValueError("ERROR: Siesta outputfile not found: %s" % self.outputfile)
rf = open(self.outputfile)
self.data = rf.read()
rf.close()
subdata = re.findall("siesta: Final energy \(eV\):[\s\d\w\W]*\n\n", self.data)
# print(subdata)
if len(subdata) == 0:
raise ValueError('No Final data could be retrieved')
elif len(subdata) > 1:
raise ValueError('ERROR: Wrong parsing of data')
ret = {}
for i in subdata[0].split('\n'):
# Debugging parser
# print('Line => %s' % i)
if 'siesta:' in i:
line = i.replace('siesta:', '').strip()
else:
line = i.strip()
if 'Final energy' in line:
master = line[:-1].strip()
elif 'Atomic forces' in line:
master = line[:-1].strip()
elif 'Stress tensor' in line:
master = line[:-1].strip()
elif 'Cell volume' in line:
ret['Cell volume'] = self.parse_line(line.split('=')[1])
elif 'Pressure' in line:
master = line[:-1].strip()
elif '(Free)E+ p_basis*V_orbitals' in line:
ret['(Free)E+ p_basis*V_orbitals'] = self.parse_line(line.split('=')[1])
elif '(Free)Eharris+ p_basis*V_orbitals' in line:
ret['(Free)Eharris+ p_basis*V_orbitals'] = self.parse_line(line.split('=')[1])
elif 'Electric dipole (a.u.)' in line:
ret['Electric dipole (a.u.)'] = self.parse_line(line.split('=')[1])
elif 'Electric dipole (Debye)' in line:
ret['Electric dipole (Debye)'] = self.parse_line(line.split('=')[1])
elif 'Vacuum level (max, mean)' in line:
ret['Vacuum level (max, mean)'] = self.parse_line(line.split('=')[1])
elif 'Elapsed wall time (sec)' in line:
ret['Elapsed wall time (sec)'] = self.parse_line(line.split('=')[1])
elif line.strip() == '':
continue
elif line.strip()[:3] == '---' and line.strip()[-3:] == '---':
continue
elif "CPU execution times" in line:
break
elif master is not None:
if '=' in line:
if master not in ret:
ret[master] = {}
key = line.split('=')[0].strip()
value = line.split('=')[1]
ret[master][key] = self.parse_line(value)
else:
if master not in ret:
ret[master] = []
ret[master].append(self.parse_line(line))
self.output_values = ret
def parse_line(self, line):
ret = []
for i in line.split():
try:
value = int(i)
except ValueError:
try:
value = float(i)
except ValueError:
value = i
ret.append(value)
if len(ret) == 1:
ret = ret[0]
return ret
def show_parsed_data(self):
if self.output_values is None:
raise ValueError('No data has been parsed')
else:
print(json.dumps(self.output_values, sort_keys=True, indent=4, separators=(',', ': ')))
|
MaterialsDiscovery/PyChemia
|
pychemia/code/siesta/output.py
|
Python
|
mit
| 4,168
|
[
"SIESTA"
] |
43fbab8fd0ee8bcf6fac3b20d774e82c9883ff07a09e917c86b6b09fe484b7eb
|
import collections as coll
import numpy as np
from scipy import ndimage
import warnings
from ..util import img_as_float
from ..color import guess_spatial_dimensions
__all__ = ['gaussian_filter']
def gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,
multichannel=None):
"""
Multi-dimensional Gaussian filter
Parameters
----------
image : array-like
input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: None)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together). Only 3 channels are supported. If `None`,
the function will attempt to guess this, and raise a warning if
ambiguous, when the array has shape (M, N, 3).
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function is a wrapper around :func:`scipy.ndimage.gaussian_filter`.
Integer arrays are converted to float.
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[1, 1] = 1
>>> a
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
>>> gaussian_filter(a, sigma=0.4) # mild smoothing
array([[ 0.00163116, 0.03712502, 0.00163116],
[ 0.03712502, 0.84496158, 0.03712502],
[ 0.00163116, 0.03712502, 0.00163116]])
>>> gaussian_filter(a, sigma=1) # more smooting
array([[ 0.05855018, 0.09653293, 0.05855018],
[ 0.09653293, 0.15915589, 0.09653293],
[ 0.05855018, 0.09653293, 0.05855018]])
>>> # Several modes are possible for handling boundaries
>>> gaussian_filter(a, sigma=1, mode='reflect')
array([[ 0.08767308, 0.12075024, 0.08767308],
[ 0.12075024, 0.16630671, 0.12075024],
[ 0.08767308, 0.12075024, 0.08767308]])
>>> # For RGB images, each is filtered separately
>>> from skimage.data import lena
>>> image = lena()
>>> filtered_lena = gaussian_filter(image, sigma=1, multichannel=True)
"""
spatial_dims = guess_spatial_dimensions(image)
if spatial_dims is None and multichannel is None:
msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB" +
" by default. Use `multichannel=False` to interpret as " +
" 3D image with last dimension of length 3.")
warnings.warn(RuntimeWarning(msg))
multichannel = True
if multichannel:
# do not filter across channels
if not isinstance(sigma, coll.Iterable):
sigma = [sigma] * (image.ndim - 1)
if len(sigma) != image.ndim:
sigma = np.concatenate((np.asarray(sigma), [0]))
image = img_as_float(image)
return ndimage.gaussian_filter(image, sigma, mode=mode, cval=cval)
|
chintak/scikit-image
|
skimage/filter/_gaussian.py
|
Python
|
bsd-3-clause
| 4,008
|
[
"Gaussian"
] |
c9701e2b7b27109afed217a916878903a19573ed74f14ddc09b1e8c8e10e7fc4
|
# -*- coding: utf-8 -*-
# Author: Braden Czapla (2019)
# Last modified: 2019-04-29
# Original data: Kaiser et al. 1962, https://doi.org/10.1103/PhysRev.127.1950
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# Determine wavelengths to sample
def w(w_max, w_min, step):
linspace_lower = (np.floor_divide(w_min, step)+1)*step
N = np.floor_divide(w_max-w_min, step)
linspace_upper = linspace_lower + N*step
w = np.linspace(linspace_lower, linspace_upper, int(N)+1)
if not np.isclose(w[0], w_min, atol=step/5.):
w = np.concatenate((np.array([w_min]), w))
if not np.isclose(w[-1], w_max, atol=step/5.):
w = np.concatenate((w,np.array([w_max])))
return w, len(w)
# Compute dielectric function using Lorentzian model.
# Units of w and ResFreq must match and must be directly proportional to angular frequency. All other parameters are unitless.
def Lorentzian(w, ResFreq, Strength, Damping, Eps_Inf):
Permittivity = Eps_Inf*np.ones(len(w), dtype=np.complex)
for ii in range(len(ResFreq)):
Permittivity += Strength[ii]/( 1. - (w/ResFreq[ii])**2 - 1j*Damping[ii]*(w/ResFreq[ii]) )
return Permittivity
# Save w, n, k to YML file
def SaveYML(w_um, RefInd, filename, references='', comments=''):
header = np.empty(9, dtype=object)
header[0] = '# this file is part of refractiveindex.info database'
header[1] = '# refractiveindex.info database is in the public domain'
header[2] = '# copyright and related rights waived via CC0 1.0'
header[3] = ''
header[4] = 'REFERENCES:' + references
header[5] = 'COMMENTS:' + comments
header[6] = 'DATA:'
header[7] = ' - type: tabulated nk'
header[8] = ' data: |'
export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
np.savetxt(filename, export, fmt='%4.2f %#.4g %#.4g', delimiter=' ', header='\n'.join(header), comments='',newline='\n ')
return
###############################################################################
## Wavelengths to sample ##
w_um_max = 80. # [um]
w_um_min = 10. # [um]
step_um = 0.05 # [um]
w_um, N_freq = w(w_um_max, w_um_min, step_um)
w_invcm = 10000./w_um
## ##
## Model Parameters ##
# See Table I
ResFreq = np.array([184., 278.]) # [cm^-1]
Strength = np.array([4.50, 0.07])
Damping = np.array([0.020, 0.30])
Eps_Inf = 2.16
## ##
## Generate and Save Data ##
eps = Lorentzian(w_invcm, ResFreq, Strength, Damping, Eps_Inf)
RefInd = np.sqrt(eps)
references = ' "W. Kaiser, W. G. Spitzer, R. H. Kaiser, and L. E. Howarth. Infrared Properties of CaF2, SrF2, and BaF2, <a href=\"https://doi.org/10.1103/PhysRev.127.1950\"><i>Phys. Rev.</i> <b>127</b>, 1950 (1962)</a>"'
comments = ' "Single crystal; Room temperature; Lorentz oscillator model parameters provided."'
SaveYML(w_um, RefInd, 'Kaiser-BaF2.yml', references, comments)
## ##
## Plotting ##
plt.figure('Figure 7 - n')
plt.plot(w_um, np.real(RefInd), label='BaF$_{2}$')
plt.legend(loc=1)
plt.xlim(10,80)
plt.ylim(0,14)
plt.figure('Figure 8 - k')
plt.plot(w_um, np.imag(RefInd), label='BaF$_{2}$')
plt.legend(loc=1)
plt.xlim(10,80)
plt.ylim(0,14)
## ##
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Kaiser 1962 - BaF2.py
|
Python
|
gpl-3.0
| 3,299
|
[
"CRYSTAL"
] |
a4b6bfbbe8d6691f3d106412902a477887df85318ebf571eedb3d7c047d9f5ec
|
"""
snowbird
--------
Tools for migrating data
"""
from setuptools import setup
setup(
name='snowbird',
version='0.1',
url='http://github.com/unbracketed/snowbird/',
license='BSD',
author='Brian Luft',
packages=['snowbird'],
#namespace_packages=['snowbird'],
zip_safe=False,
platforms='any',
install_requires=[],
classifiers=[
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
)
|
unbracketed/snowbird
|
setup.py
|
Python
|
mit
| 797
|
[
"Brian"
] |
841a5667f539367b5f616bbba316cedce7c2c1a14ab258d0f3d7263776e2fff3
|
# okay, let's try outsome very basic stuff here to see if it works
import numpy as np
import matplotlib.pyplot as plt
#first let's draw points from standard bivariate normal
mu = [0,0]
sigma = [[1,0],[0,1]]
samples = np.random.multivariate_normal(mu, sigma, 1000000)
#now we need to plot said samples on 2d graph
print type(samples)
print samples.shape
x = samples[:,0]
y = samples[:,1]
#fig = plt.figure()
#ax1 = fig.add_subplot(121)
#plt.hist(x)
#plt.title('x dimension')
#ax2 = fig.add_subplot(122)
#plt.hist(y)
#plt.title('y dimension')
#plt.show(fig)
# now we get the conditional distributions
def get_cond(samps,vals,val, sample_width):
rets = []
for i in xrange(len(samps)):
samp = samps[i]
if samp <=val+sample_width and samp >=val-sample_width:
rets.append(vals[i])
np.array(rets)
return rets
conds = get_cond(x,y,-2,0.3)
fig = plt.figure()
ax1 = fig.add_subplot(121)
plt.hist(y)
plt.title('y dimension by itself')
ax2 = fig.add_subplot(122)
plt.hist(conds)
plt.title('y | x ~0')
plt.show(fig)
# yeah, the trouble is it changes. we can plot KLs for all of them and mean them if you like
# or do some other kind of thing. there are probably more reasonable ways to test this
# but the trouble is,in mcmc we won't get samples for these points realistically anyhow
# and it'll take ages to assume this generally, and I'm not sure how to get general conds
# and i'm not sure it will help!!! dagnabbit. it is a cool method though!
# to be fair gibbs sampling is basically what I'm just inventing here ,but done much better and more sensibly than my way which is insane, and tries to peel off dimensions at random and without much success to be honest, but it might be good as a quick and dirty approximation, but who knows?
# gaussian processes also
#but its still hard to compete with piecewise linear
# I suppose the hope is it won't scale to high dimensions at all, but we honestly don't know
# and for arbitrary nonconcave functions
# we could try using gaussian processes, or somethign. the trouble with nns is really just that they don't do well without lots of data, and that is difficult. also, what is the objective function? is it differentiable. so we could try gps first for it if we can come up with an adaptive algorithm here. I wnoder what theirs is, and it could be cool for all i know!? yeah, their algorithm makes a whole load number of better steps, and that's cool.. who knew it was so good the ARS?
# yeah so far I think they're mostly for univariate, which is useless for any kind of serious inference in the real world mostly, unless you have dimensional decompositoin
# we cuold try a GP to hold up the method, but really we just want fast and effective function approximation to rapidly do it. the piecewise linear is some kind of taylor thing perhaps? which I doubt works in high dmiensions?
# I'msure somebody has already tried to simualte the rejection sampling envelope distributoin with a neural network. I'm not sure how much better it is, but NNs are definitely awesome function approximators, so it seems reasonable and possible, and definitely a cool paper perhaps if I can figure out a reasonable objective functoin and haven't got a straight up result already in the literature. it could be seriosuly useful generally and cool and so forth, and I just dno't know, and it would be good to have some kind of papers somewhere aroudn!
## I suppose the problem would be guaranteeing that the NN learns a function that completely coversit although I don't understand why you just straight up wouldn't just calculate f(x) for every y you want if you want to do that vs rejecion sampling as it makes no sense to e!
# we should also be able to use ANNs to help us model te distributions to be approxed via mcmc to speed up that sampling process. should be very important generally, hopefuly!
# also people can use piecewise linear functions for this. and apparently it works well. I suppose the challenge is showing that NNs work better, which is nowhere near guaranteed!?
# the conditioal distribution is another normal but a different one
# but it doesn't realy matter. waht mattersi s how we approximate it empirically
## that will be very slow and error prone presumably
# okay, so what we do is mcmc standard sampling without serious issues
# then after we've accumulated a bit, we basically get samples of all the conditional distributiosn - not sure how we avoid the problem of them needing the conditional for a specific value. I think that's the hangup where we're going to fail tbh
# but if it'sconditional in a region, we could argue that it should be conditional everywhere
# at least for a couple of regions
# so if we have common points, it should hold generally, hopefully
# at least that's the aim, and we can use bayesian opt to do that hopefully
# let's try it
# we could gaussian process it
# but that still doesn't give us the actual cond distribution right
# unless once again we GP it
# but that introduces yet another source of error into the works
# and another overhead... dagnabbit!
#wait a second, this doesn't actually help us at all, as basically we're just trying to calculate p(x,y) which is what we wanted in the first lpace!
# okay, great, sothat works. now let's try to figure out the conditional distribution empirically # well an obvious thing to do is to try to sample a bunch of points from the full posterior - we can do that right??? via mcmc smapling or something and from there calcualte the conditionals and what not and from there calculate dimensional coupling so the overhead won't actually be that large as you'll be doing mcmc sampling in the first place itwill just be an approximation method which will hopefully improve the speed of the mcmc samlpers in the first place, as that's quiet nice, and it'll just be a fairly standard overhead to be honest. so let's think about this
|
Bmillidgework/Misc-Maths
|
Misc/dimension_correlation.py
|
Python
|
mit
| 5,888
|
[
"Gaussian"
] |
4a4402fd9751e537506abfdb9c98a7567ed559cdebcacc42764a36bdfc579952
|
#!/usr/bin/python
# coding:utf-8
'''下载模块用于从url队列中取出链接进行下载,并在下载完成后将html页面封装为内部
数据格式放入html队列中以等待解析线程解析,
下载模块与解析模块之间的关系:下载模块和解析模块互为生产者和消费者,下载模块
从url队列取出数据进行消费,也生产html页面并放入html队列。解析模块从html队列
取出数据消费,也生成url链接并放入url队列
由于功能的划分,代码中将下载模块和解析模块独立分开,它们之间的接口仅为url队列
和html队列两个容器。
'''
import sys
import time
import random
import requests
from splinter import Browser
from mylogger import logger
from dataModel import HtmlModel
from helper import timestamp
from threadPool import WorkRequest
from config import *
reload(sys)
sys.setdefaultencoding("utf-8")
class Downloader(WorkRequest):
'''继承自线程池中的WorkRequest类,并实现线程执行函数
功能:用于从url队列取出链接进行下载并存入html队列
'''
def __init__(self, dlQueue, downloadMode, htmlQueue, exitEvent, downloadingFlag):
self.__htmlQueue = htmlQueue
# 下载队列,存放了主线程为其分配的url节点
self.__dlQueue = dlQueue
self.__downloadMode = downloadMode
self.__exitEvent = exitEvent
self.__downloadingFlag = downloadingFlag
def __isBigPage(self, url):
'''判断页面(文件)大小,过滤较大页面(文件)'''
try:
response = requests.head(url)
contentLen = response.headers['content-length']
contentLen = int(contentLen)
if contentLen > MAX_PAGE_SIZE:
logger.warning('This is big page, Length : %d, URL : %s', contentLen, url)
return True
return False
except Exception,e:
return False
def __staticDownload(self, url):
'''静态下载函数,使用requests模块进行下载'''
if self.__isBigPage(url):
return ""
user_agent = random.choice(USER_AGENTS)
headers = {'User-Agent': user_agent}
try:
# logger.debug('Downloading url : %s', url)
response = requests.get(url, timeout=CONNECT_TIME_OUT, headers=headers)
if response.status_code == 200:
try:
# 再次判断文件大小,用于处理重定向链接
contentLen = response.headers['content-length']
contentLen = int(contentLen)
if contentLen > MAX_PAGE_SIZE:
logger.warning('This is redirect page, before URL : %s, after URL : %s', url, response.url)
return ""
except Exception,e:
pass
page = response.text
# 判断文件的实际大小,防止content-length与实际文件大小不符的情况
if len(page) > MAX_PAGE_SIZE:
logger.warning('Downloaded big file, Length : %d , URL : %s', len(page), url)
return ""
return page
else:
logger.warning('Download failed. status code : %d', response.status_code)
return ""
except Exception, e:
logger.warning('Download exception (static): %s', str(e))
return ""
def __dynamicDownload(self, url):
'''动态下载模块,使用了splinter模块、phantomjs模块(需单独安装)'''
try:
# logger.debug('Downloading url : %s', url)
browser = Browser('phantomjs')
browser.visit(url)
html = browser.html
browser.quit()
return html
except Exception, e:
logger.warning('Download exception (dynamic): %s', str(e))
return ""
def __downloadPage(self, url):
'''判断下载模式:静态下载/动态下载'''
if self.__downloadMode == 0:
return self.__staticDownload(url)
elif self.__downloadMode == 1:
return self.__dynamicDownload(url)
def doWork(self):
'''重写WorkRequest类的线程执行函数,此函数将在线程池中执行,
功能:从为自己分配的下载队列中取出url进行下载
'''
logger.debug('Start downloader`s doWork...')
# self.test()
while True:
if self.__dlQueue.qsize() > 0:
urlNode = self.__dlQueue.get()
self.__downloadingFlag += 1
page = self.__downloadPage(urlNode.url)
if len(page) == 0:
self.__downloadingFlag -= 1
continue
# logger.debug('download page success, url: %s', urlNode.url)
# 将下载的html页面封装为内部数据格式并添加到html队列供解析模块解析
htmlNode = HtmlModel(urlNode.url, page, timestamp(), urlNode.depth)
self.__htmlQueue.put(htmlNode)
self.__downloadingFlag -= 1
# 检测退出事件
if self.__exitEvent.is_set():
logger.info('Download model quit...')
return
# 下载时间间隔
time.sleep(FETCH_TIME_INTERVAL)
def test(self):
conn = sqlite3.connect('test/test.db')
cur = conn.cursor()
sql = 'select url from zspider'
cur.execute(sql)
r = cur.fetchall()
for i in range(len(r)):
url = r[i][0]
urlNode = UrlModel(url, 'parenturl', '2013-12-12 12:12:12' , 0)
self.urlQueue.put(urlNode)
cur.close()
conn.close()
|
zhjl120/ZSpider
|
src/downloader.py
|
Python
|
mit
| 5,793
|
[
"VisIt"
] |
fd485a18bfb8b79b8d7b7ec9e09ca94ec57bbade79cc95dd8e0629a7efad9bb7
|
################################################################################
# Copyright (C) 2011-2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
General numerical functions and methods.
"""
import functools
import itertools
import operator
import sys
import getopt
import numpy as np
import scipy as sp
import scipy.linalg as linalg
import scipy.special as special
import scipy.optimize as optimize
import scipy.sparse as sparse
import tempfile as tmp
import unittest
from numpy import testing
def flatten_axes(X, *ndims):
ndim = sum(ndims)
if np.ndim(X) < ndim:
raise ValueError("Not enough ndims in the array")
if len(ndims) == 0:
return X
shape = np.shape(X)
i = np.ndim(X) - ndim
plates = shape[:i]
nd_sums = i + np.cumsum((0,) + ndims)
sizes = tuple(
np.prod(shape[i:j])
for (i, j) in zip(nd_sums[:-1], nd_sums[1:])
)
return np.reshape(X, plates + sizes)
def reshape_axes(X, *shapes):
ndim = len(shapes)
if np.ndim(X) < ndim:
raise ValueError("Not enough ndims in the array")
i = np.ndim(X) - ndim
sizes = tuple(np.prod(sh) for sh in shapes)
if np.shape(X)[i:] != sizes:
raise ValueError("Shapes inconsistent with sizes")
shape = tuple(i for sh in shapes for i in sh)
return np.reshape(X, np.shape(X)[:i] + shape)
def find_set_index(index, set_lengths):
"""
Given set sizes and an index, returns the index of the set
The given index is for the concatenated list of the sets.
"""
# Negative indices to positive
if index < 0:
index += np.sum(set_lengths)
# Indices must be on range (0, N-1)
if index >= np.sum(set_lengths) or index < 0:
raise Exception("Index out bounds")
return np.searchsorted(np.cumsum(set_lengths), index, side='right')
def parse_command_line_arguments(mandatory_args, *optional_args_list, argv=None):
"""
Parse command line arguments of style "--parameter=value".
Parameter specification is tuple: (name, converter, description).
Some special handling:
* If converter is None, the command line does not accept any value
for it, but instead use either "--option" to enable or
"--no-option" to disable.
* If argument name contains hyphens, those are converted to
underscores in the keys of the returned dictionaries.
Parameters
----------
mandatory_args : list of tuples
Specs for mandatory arguments
optional_args_list : list of lists of tuples
Specs for each optional arguments set
argv : list of strings (optional)
The command line arguments. By default, read sys.argv.
Returns
-------
args : dictionary
The parsed mandatory arguments
kwargs : dictionary
The parsed optional arguments
Examples
--------
>>> from pprint import pprint as print
>>> from bayespy.utils import misc
>>> (args, kwargs) = misc.parse_command_line_arguments(
... # Mandatory arguments
... [
... ('name', str, "Full name"),
... ('age', int, "Age (years)"),
... ('employed', None, "Working"),
... ],
... # Optional arguments
... [
... ('phone', str, "Phone number"),
... ('favorite-color', str, "Favorite color")
... ],
... argv=['--name=John Doe',
... '--age=42',
... '--no-employed',
... '--favorite-color=pink']
... )
>>> print(args)
{'age': 42, 'employed': False, 'name': 'John Doe'}
>>> print(kwargs)
{'favorite_color': 'pink'}
It is possible to have several optional argument sets:
>>> (args, kw_info, kw_fav) = misc.parse_command_line_arguments(
... # Mandatory arguments
... [
... ('name', str, "Full name"),
... ],
... # Optional arguments (contact information)
... [
... ('phone', str, "Phone number"),
... ('email', str, "E-mail address")
... ],
... # Optional arguments (preferences)
... [
... ('favorite-color', str, "Favorite color"),
... ('favorite-food', str, "Favorite food")
... ],
... argv=['--name=John Doe',
... '--favorite-color=pink',
... '--email=john.doe@email.com',
... '--favorite-food=spaghetti']
... )
>>> print(args)
{'name': 'John Doe'}
>>> print(kw_info)
{'email': 'john.doe@email.com'}
>>> print(kw_fav)
{'favorite_color': 'pink', 'favorite_food': 'spaghetti'}
"""
if argv is None:
argv = sys.argv[1:]
mandatory_arg_names = [arg[0] for arg in mandatory_args]
# Sizes of each optional argument list
optional_args_lengths = [len(opt_args) for opt_args in optional_args_list]
all_args = mandatory_args + functools.reduce(operator.add, optional_args_list, [])
# Create a list of arg names for the getopt parser
arg_list = []
for arg in all_args:
arg_name = arg[0].lower()
if arg[1] is None:
arg_list.append(arg_name)
arg_list.append("no-" + arg_name)
else:
arg_list.append(arg_name + "=")
if len(set(arg_list)) < len(arg_list):
raise Exception("Argument names are not unique")
# Use getopt parser
try:
(cl_opts, cl_args) = getopt.getopt(argv, "", arg_list)
except getopt.GetoptError as err:
print(err)
print("Usage:")
for arg in all_args:
if arg[1] is None:
print("--{0}\t{1}".format(arg[0].lower(),
arg[2]))
else:
print("--{0}=<{1}>\t{2}".format(arg[0].lower(),
str(arg[1].__name__).upper(),
arg[2]))
sys.exit(2)
# A list of all valid flag names: ["--first-argument", "--another-argument"]
valid_flags = []
valid_flag_arg_indices = []
for (ind, arg) in enumerate(all_args):
valid_flags.append("--" + arg[0].lower())
valid_flag_arg_indices.append(ind)
if arg[1] is None:
valid_flags.append("--no-" + arg[0].lower())
valid_flag_arg_indices.append(ind)
# Go through all the given command line arguments and store them in the
# correct dictionaries
args = dict()
kwargs_list = [dict() for i in range(len(optional_args_list))]
handled_arg_names = []
for (cl_opt, cl_arg) in cl_opts:
# Get the index of the argument
try:
ind = valid_flag_arg_indices[valid_flags.index(cl_opt.lower())]
except ValueError:
print("Invalid command line argument: {0}".format(cl_opt))
raise Exception("Invalid argument given")
# Check that the argument wasn't already given and then mark the
# argument as handled
if all_args[ind][0] in handled_arg_names:
raise Exception("Same argument given multiple times")
else:
handled_arg_names.append(all_args[ind][0])
# Check whether to add the argument to the mandatory or optional
# argument dictionary
if ind < len(mandatory_args):
dict_to = args
else:
dict_index = find_set_index(ind - len(mandatory_args),
optional_args_lengths)
dict_to = kwargs_list[dict_index]
# Convert and store the argument
convert_function = all_args[ind][1]
arg_name = all_args[ind][0].replace('-', '_')
if convert_function is None:
if cl_opt[:5] == "--no-":
dict_to[arg_name] = False
else:
dict_to[arg_name] = True
else:
dict_to[arg_name] = convert_function(cl_arg)
# Check if some mandatory argument was not given
for arg_name in mandatory_arg_names:
if arg_name not in handled_arg_names:
raise Exception("Mandatory argument --{0} not given".format(arg_name))
return tuple([args] + kwargs_list)
def composite_function(function_list):
"""
Construct a function composition from a list of functions.
Given a list of functions [f,g,h], constructs a function :math:`h \circ g
\circ f`. That is, returns a function :math:`z`, for which :math:`z(x) =
h(g(f(x)))`.
"""
def composite(X):
for function in function_list:
X = function(X)
return X
return composite
def ceildiv(a, b):
"""
Compute a divided by b and rounded up.
"""
return -(-a // b)
def rmse(y1, y2, axis=None):
return np.sqrt(np.mean((y1-y2)**2, axis=axis))
def is_callable(f):
return hasattr(f, '__call__')
def atleast_nd(X, d):
if np.ndim(X) < d:
sh = (d-np.ndim(X))*(1,) + np.shape(X)
X = np.reshape(X, sh)
return X
def T(X):
"""
Transpose the matrix.
"""
return np.swapaxes(X, -1, -2)
class TestCase(unittest.TestCase):
"""
Simple base class for unit testing.
Adds NumPy's features to Python's unittest.
"""
def assertAllClose(self, A, B,
msg="Arrays not almost equal",
rtol=1e-4,
atol=0):
self.assertEqual(np.shape(A), np.shape(B), msg=msg)
testing.assert_allclose(A, B, err_msg=msg, rtol=rtol, atol=atol)
pass
def assertArrayEqual(self, A, B, msg="Arrays not equal"):
self.assertEqual(np.shape(A), np.shape(B), msg=msg)
testing.assert_array_equal(A, B, err_msg=msg)
pass
def assertMessage(self, M1, M2):
if len(M1) != len(M2):
self.fail("Message lists have different lengths")
for (m1, m2) in zip(M1, M2):
self.assertAllClose(m1, m2)
pass
def assertMessageToChild(self, X, u):
self.assertMessage(X._message_to_child(), u)
pass
def symm(X):
"""
Make X symmetric.
"""
return 0.5 * (X + np.swapaxes(X, -1, -2))
def unique(l):
"""
Remove duplicate items from a list while preserving order.
"""
seen = set()
seen_add = seen.add
return [ x for x in l if x not in seen and not seen_add(x)]
def tempfile(prefix='', suffix=''):
return tmp.NamedTemporaryFile(prefix=prefix, suffix=suffix).name
def write_to_hdf5(group, data, name):
"""
Writes the given array into the HDF5 file.
"""
try:
# Try using compression. It doesn't work for scalars.
group.create_dataset(name,
data=data,
compression='gzip')
except TypeError:
group.create_dataset(name,
data=data)
except ValueError:
raise ValueError('Could not write %s' % data)
def nans(size=()):
return np.tile(np.nan, size)
def trues(shape):
return np.ones(shape, dtype=np.bool)
def identity(*shape):
return np.reshape(np.identity(np.prod(shape)), shape+shape)
def array_to_scalar(x):
# This transforms an N-dimensional array to a scalar. It's most
# useful when you know that the array has only one element and you
# want it out as a scalar.
return np.ravel(x)[0]
#def diag(x):
def put(x, indices, y, axis=-1, ufunc=np.add):
"""A kind of inverse mapping of `np.take`
In a simple, the operation can be thought as:
.. code-block:: python
x[indices] += y
with the exception that all entries of `y` are used instead of just the
first occurence corresponding to a particular element. That is, the results
are accumulated, and the accumulation function can be changed by providing
`ufunc`. For instance, `np.multiply` corresponds to:
.. code-block:: python
x[indices] *= y
Whereas `np.take` picks indices along an axis and returns the resulting
array, `put` similarly picks indices along an axis but accumulates the
given values to those entries.
Example
-------
.. code-block:: python
>>> x = np.zeros(3)
>>> put(x, [2, 2, 0, 2, 2], 1)
array([ 1., 0., 4.])
`y` must broadcast to the shape of `np.take(x, indices)`:
.. code-block:: python
>>> x = np.zeros((3,4))
>>> put(x, [[2, 2, 0, 2, 2], [1, 2, 1, 2, 1]], np.ones((2,1,4)), axis=0)
array([[ 1., 1., 1., 1.],
[ 3., 3., 3., 3.],
[ 6., 6., 6., 6.]])
"""
#x = np.copy(x)
ndim = np.ndim(x)
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
# Make axis index positive: [0, ..., ndim-1]
if axis < 0:
axis = axis + ndim
if axis < 0 or axis >= ndim:
raise ValueError("Axis out of bounds")
indices = axis*(slice(None),) + (indices,) + (ndim-axis-1)*(slice(None),)
#y = add_trailing_axes(y, ndim-axis-1)
ufunc.at(x, indices, y)
return x
def put_simple(y, indices, axis=-1, length=None):
"""An inverse operation of `np.take` with accumulation and broadcasting.
Compared to `put`, the difference is that the result array is initialized
with an array of zeros whose shape is determined automatically and `np.add`
is used as the accumulator.
"""
if length is None:
# Try to determine the original length of the axis by finding the
# largest index. It is more robust to give the length explicitly.
indices = np.copy(indices)
indices[indices<0] = np.abs(indices[indices<0]) - 1
length = np.amax(indices) + 1
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
# Make axis index negative: [-ndim, ..., -1]
if axis >= 0:
raise ValueError("Axis index must be negative")
y = atleast_nd(y, abs(axis)-1)
shape_y = np.shape(y)
end_before = axis - np.ndim(indices) + 1
start_after = axis + 1
if end_before == 0:
shape_x = shape_y + (length,)
elif start_after == 0:
shape_x = shape_y[:end_before] + (length,)
else:
shape_x = shape_y[:end_before] + (length,) + shape_y[start_after:]
x = np.zeros(shape_x)
return put(x, indices, y, axis=axis)
def grid(x1, x2):
""" Returns meshgrid as a (M*N,2)-shape array. """
(X1, X2) = np.meshgrid(x1, x2)
return np.hstack((X1.reshape((-1,1)),X2.reshape((-1,1))))
# class CholeskyDense():
# def __init__(self, K):
# self.U = linalg.cho_factor(K)
# def solve(self, b):
# if sparse.issparse(b):
# b = b.toarray()
# return linalg.cho_solve(self.U, b)
# def logdet(self):
# return 2*np.sum(np.log(np.diag(self.U[0])))
# def trace_solve_gradient(self, dK):
# return np.trace(self.solve(dK))
# class CholeskySparse():
# def __init__(self, K):
# self.LD = cholmod.cholesky(K)
# def solve(self, b):
# if sparse.issparse(b):
# b = b.toarray()
# return self.LD.solve_A(b)
# def logdet(self):
# return self.LD.logdet()
# #np.sum(np.log(LD.D()))
# def trace_solve_gradient(self, dK):
# # WTF?! numpy.multiply doesn't work for two sparse
# # matrices.. It returns a result but it is incorrect!
# # Use the identity trace(K\dK)=sum(inv(K).*dK) by computing
# # the sparse inverse (lower triangular part)
# iK = self.LD.spinv(form='lower')
# return (2*iK.multiply(dK).sum()
# - iK.diagonal().dot(dK.diagonal()))
# # Multiply by two because of symmetry (remove diagonal once
# # because it was taken into account twice)
# #return np.multiply(self.LD.inv().todense(),dK.todense()).sum()
# #return self.LD.inv().multiply(dK).sum() # THIS WORKS
# #return np.multiply(self.LD.inv(),dK).sum() # THIS NOT WORK!! WTF??
# iK = self.LD.spinv()
# return iK.multiply(dK).sum()
# #return (2*iK.multiply(dK).sum()
# # - iK.diagonal().dot(dK.diagonal()))
# #return (2*np.multiply(iK, dK).sum()
# # - iK.diagonal().dot(dK.diagonal())) # THIS NOT WORK!!
# #return np.trace(self.solve(dK))
# def cholesky(K):
# if isinstance(K, np.ndarray):
# return CholeskyDense(K)
# elif sparse.issparse(K):
# return CholeskySparse(K)
# else:
# raise Exception("Unsupported covariance matrix type")
# Computes log probability density function of the Gaussian
# distribution
def gaussian_logpdf(y_invcov_y,
y_invcov_mu,
mu_invcov_mu,
logdetcov,
D):
return (-0.5*D*np.log(2*np.pi)
-0.5*logdetcov
-0.5*y_invcov_y
+y_invcov_mu
-0.5*mu_invcov_mu)
def zipper_merge(*lists):
"""
Combines lists by alternating elements from them.
Combining lists [1,2,3], ['a','b','c'] and [42,666,99] results in
[1,'a',42,2,'b',666,3,'c',99]
The lists should have equal length or they are assumed to have the length of
the shortest list.
This is known as alternating merge or zipper merge.
"""
return list(sum(zip(*lists), ()))
def remove_whitespace(s):
return ''.join(s.split())
def is_numeric(a):
return (np.isscalar(a) or
isinstance(a, list) or
isinstance(a, np.ndarray))
def is_scalar_integer(x):
t = np.asanyarray(x).dtype.type
return np.ndim(x) == 0 and issubclass(t, np.integer)
def isinteger(x):
t = np.asanyarray(x).dtype.type
return ( issubclass(t, np.integer) or issubclass(t, np.bool_) )
def is_string(s):
return isinstance(s, str)
def multiply_shapes(*shapes):
"""
Compute element-wise product of lists/tuples.
Shorter lists are concatenated with leading 1s in order to get lists with
the same length.
"""
# Make the shapes equal length
shapes = make_equal_length(*shapes)
# Compute element-wise product
f = lambda X,Y: (x*y for (x,y) in zip(X,Y))
shape = functools.reduce(f, shapes)
return tuple(shape)
def make_equal_length(*shapes):
"""
Make tuples equal length.
Add leading 1s to shorter tuples.
"""
# Get maximum length
max_len = max((len(shape) for shape in shapes))
# Make the shapes equal length
shapes = ((1,)*(max_len-len(shape)) + tuple(shape) for shape in shapes)
return shapes
def make_equal_ndim(*arrays):
"""
Add trailing unit axes so that arrays have equal ndim
"""
shapes = [np.shape(array) for array in arrays]
shapes = make_equal_length(*shapes)
arrays = [np.reshape(array, shape)
for (array, shape) in zip(arrays, shapes)]
return arrays
def sum_to_dim(A, dim):
"""
Sum leading axes of A such that A has dim dimensions.
"""
dimdiff = np.ndim(A) - dim
if dimdiff > 0:
axes = np.arange(dimdiff)
A = np.sum(A, axis=axes)
return A
def broadcasting_multiplier(plates, *args):
"""
Compute the plate multiplier for given shapes.
The first shape is compared to all other shapes (using NumPy
broadcasting rules). All the elements which are non-unit in the first
shape but 1 in all other shapes are multiplied together.
This method is used, for instance, for computing a correction factor for
messages to parents: If this node has non-unit plates that are unit
plates in the parent, those plates are summed. However, if the message
has unit axis for that plate, it should be first broadcasted to the
plates of this node and then summed to the plates of the parent. In
order to avoid this broadcasting and summing, it is more efficient to
just multiply by the correct factor. This method computes that
factor. The first argument is the full plate shape of this node (with
respect to the parent). The other arguments are the shape of the message
array and the plates of the parent (with respect to this node).
"""
# Check broadcasting of the shapes
for arg in args:
broadcasted_shape(plates, arg)
# Check that each arg-plates are a subset of plates?
for arg in args:
if not is_shape_subset(arg, plates):
print("Plates:", plates)
print("Args:", args)
raise ValueError("The shapes in args are not a sub-shape of "
"plates")
r = 1
for j in range(-len(plates),0):
mult = True
for arg in args:
# if -j <= len(arg) and arg[j] != 1:
if not (-j > len(arg) or arg[j] == 1):
mult = False
if mult:
r *= plates[j]
return r
def sum_multiply_to_plates(*arrays, to_plates=(), from_plates=None, ndim=0):
"""
Compute the product of the arguments and sum to the target shape.
"""
arrays = list(arrays)
def get_plates(x):
if ndim == 0:
return x
else:
return x[:-ndim]
plates_arrays = [get_plates(np.shape(array)) for array in arrays]
product_plates = broadcasted_shape(*plates_arrays)
if from_plates is None:
from_plates = product_plates
r = 1
else:
r = broadcasting_multiplier(from_plates, product_plates, to_plates)
for ind in range(len(arrays)):
plates_others = plates_arrays[:ind] + plates_arrays[(ind+1):]
plates_without = broadcasted_shape(to_plates, *plates_others)
ax = axes_to_collapse(plates_arrays[ind], #get_plates(np.shape(arrays[ind])),
plates_without)
if ax:
ax = tuple([a-ndim for a in ax])
arrays[ind] = np.sum(arrays[ind], axis=ax, keepdims=True)
plates_arrays = [get_plates(np.shape(array)) for array in arrays]
product_plates = broadcasted_shape(*plates_arrays)
ax = axes_to_collapse(product_plates, to_plates)
if ax:
ax = tuple([a-ndim for a in ax])
y = sum_multiply(*arrays, axis=ax, keepdims=True)
else:
y = functools.reduce(np.multiply, arrays)
y = squeeze_to_dim(y, len(to_plates) + ndim)
return r * y
def sum_multiply(*args, axis=None, sumaxis=True, keepdims=False):
# Computes sum(arg[0]*arg[1]*arg[2]*..., axis=axes_to_sum) without
# explicitly computing the intermediate product
if len(args) == 0:
raise ValueError("You must give at least one input array")
# Dimensionality of the result
max_dim = 0
for k in range(len(args)):
max_dim = max(max_dim, np.ndim(args[k]))
if sumaxis:
if axis is None:
# Sum all axes
axes = []
else:
if np.isscalar(axis):
axis = [axis]
axes = [i
for i in range(max_dim)
if i not in axis and (-max_dim+i) not in axis]
else:
if axis is None:
# Keep all axes
axes = range(max_dim)
else:
# Find axes that are kept
if np.isscalar(axis):
axes = [axis]
axes = [i if i >= 0
else i+max_dim
for i in axis]
axes = sorted(axes)
if len(axes) > 0 and (min(axes) < 0 or max(axes) >= max_dim):
raise ValueError("Axis index out of bounds")
# Form a list of pairs: the array in the product and its axes
pairs = list()
for i in range(len(args)):
a = args[i]
a_dim = np.ndim(a)
pairs.append(a)
pairs.append(range(max_dim-a_dim, max_dim))
# Output axes are those which are not summed
pairs.append(axes)
# Compute the sum-product
try:
y = np.einsum(*pairs)
except ValueError as err:
if str(err) == ("If 'op_axes' or 'itershape' is not NULL in "
"theiterator constructor, 'oa_ndim' must be greater "
"than zero"):
# TODO/FIXME: Handle a bug in NumPy. If all arguments to einsum are
# scalars, it raises an error. For scalars we can just use multiply
# and forget about summing. Hopefully, in the future, einsum handles
# scalars properly and this try-except becomes unnecessary.
y = functools.reduce(np.multiply, args)
else:
raise err
# Restore summed axes as singleton axes
if keepdims:
d = 0
s = ()
for k in range(max_dim):
if k in axes:
# Axis not summed
s = s + (np.shape(y)[d],)
d += 1
else:
# Axis was summed
s = s + (1,)
y = np.reshape(y, s)
return y
def sum_product(*args, axes_to_keep=None, axes_to_sum=None, keepdims=False):
if axes_to_keep is not None:
return sum_multiply(*args,
axis=axes_to_keep,
sumaxis=False,
keepdims=keepdims)
else:
return sum_multiply(*args,
axis=axes_to_sum,
sumaxis=True,
keepdims=keepdims)
def moveaxis(A, axis_from, axis_to):
"""
Move the axis `axis_from` to position `axis_to`.
"""
if ((axis_from < 0 and abs(axis_from) > np.ndim(A)) or
(axis_from >= 0 and axis_from >= np.ndim(A)) or
(axis_to < 0 and abs(axis_to) > np.ndim(A)) or
(axis_to >= 0 and axis_to >= np.ndim(A))):
raise ValueError("Can't move axis %d to position %d. Axis index out of "
"bounds for array with shape %s"
% (axis_from,
axis_to,
np.shape(A)))
axes = np.arange(np.ndim(A))
axes[axis_from:axis_to] += 1
axes[axis_from:axis_to:-1] -= 1
axes[axis_to] = axis_from
return np.transpose(A, axes=axes)
def safe_indices(inds, shape):
"""
Makes sure that indices are valid for given shape.
The shorter shape determines the length.
For instance,
.. testsetup::
from bayespy.utils.misc import safe_indices
>>> safe_indices( (3, 4, 5), (1, 6) )
(0, 5)
"""
m = min(len(inds), len(shape))
if m == 0:
return ()
inds = inds[-m:]
maxinds = np.array(shape[-m:]) - 1
return tuple(np.fmin(inds, maxinds))
def broadcasted_shape(*shapes):
"""
Computes the resulting broadcasted shape for a given set of shapes.
Uses the broadcasting rules of NumPy. Raises an exception if the shapes do
not broadcast.
"""
dim = 0
for a in shapes:
dim = max(dim, len(a))
S = ()
for i in range(-dim,0):
s = 1
for a in shapes:
if -i <= len(a):
if s == 1:
s = a[i]
elif a[i] != 1 and a[i] != s:
raise ValueError("Shapes %s do not broadcast" % (shapes,))
S = S + (s,)
return S
def broadcasted_shape_from_arrays(*arrays):
"""
Computes the resulting broadcasted shape for a given set of arrays.
Raises an exception if the shapes do not broadcast.
"""
shapes = [np.shape(array) for array in arrays]
return broadcasted_shape(*shapes)
def is_shape_subset(sub_shape, full_shape):
"""
"""
if len(sub_shape) > len(full_shape):
return False
for i in range(len(sub_shape)):
ind = -1 - i
if sub_shape[ind] != 1 and sub_shape[ind] != full_shape[ind]:
return False
return True
def add_axes(X, num=1, axis=0):
for i in range(num):
X = np.expand_dims(X, axis=axis)
return X
shape = np.shape(X)[:axis] + num*(1,) + np.shape(X)[axis:]
return np.reshape(X, shape)
def add_leading_axes(x, n):
return add_axes(x, axis=0, num=n)
def add_trailing_axes(x, n):
return add_axes(x, axis=-1, num=n)
def nested_iterator(max_inds):
s = [range(i) for i in max_inds]
return itertools.product(*s)
def first(L):
"""
"""
for (n,l) in enumerate(L):
if l:
return n
return None
def squeeze(X):
"""
Remove leading axes that have unit length.
For instance, a shape (1,1,4,1,3) will be reshaped to (4,1,3).
"""
shape = np.array(np.shape(X))
inds = np.nonzero(shape != 1)[0]
if len(inds) == 0:
shape = ()
else:
shape = shape[inds[0]:]
return np.reshape(X, shape)
def squeeze_to_dim(X, dim):
s = tuple(range(np.ndim(X)-dim))
return np.squeeze(X, axis=s)
def axes_to_collapse(shape_x, shape_to):
# Solves which axes of shape shape_x need to be collapsed in order
# to get the shape shape_to
s = ()
for j in range(-len(shape_x), 0):
if shape_x[j] != 1:
if -j > len(shape_to) or shape_to[j] == 1:
s += (j,)
elif shape_to[j] != shape_x[j]:
print('Shape from: ' + str(shape_x))
print('Shape to: ' + str(shape_to))
raise Exception('Incompatible shape to squeeze')
return tuple(s)
def sum_to_shape(X, s):
"""
Sum axes of the array such that the resulting shape is as given.
Thus, the shape of the result will be s or an error is raised.
"""
# First, sum and remove axes that are not in s
if np.ndim(X) > len(s):
axes = tuple(range(-np.ndim(X), -len(s)))
else:
axes = ()
Y = np.sum(X, axis=axes)
# Second, sum axes that are 1 in s but keep the axes
axes = ()
for i in range(-np.ndim(Y), 0):
if s[i] == 1:
if np.shape(Y)[i] > 1:
axes = axes + (i,)
else:
if np.shape(Y)[i] != s[i]:
raise ValueError("Shape %s can't be summed to shape %s" %
(np.shape(X), s))
Y = np.sum(Y, axis=axes, keepdims=True)
return Y
def repeat_to_shape(A, s):
# Current shape
t = np.shape(A)
if len(t) > len(s):
raise Exception("Can't repeat to a smaller shape")
# Add extra axis
t = tuple([1]*(len(s)-len(t))) + t
A = np.reshape(A,t)
# Repeat
for i in reversed(range(len(s))):
if s[i] != t[i]:
if t[i] != 1:
raise Exception("Can't repeat non-singular dimensions")
else:
A = np.repeat(A, s[i], axis=i)
return A
def multidigamma(a, d):
"""
Returns the derivative of the log of multivariate gamma.
"""
return np.sum(special.digamma(a[...,None] - 0.5*np.arange(d)),
axis=-1)
m_digamma = multidigamma
def diagonal(A):
return np.diagonal(A, axis1=-2, axis2=-1)
def make_diag(X, ndim=1, ndim_from=0):
"""
Create a diagonal array given the diagonal elements.
The diagonal array can be multi-dimensional. By default, the last axis is
transformed to two axes (diagonal matrix) but this can be changed using ndim
keyword. For instance, an array with shape (K,L,M,N) can be transformed to a
set of diagonal 4-D tensors with shape (K,L,M,N,M,N) by giving ndim=2. If
ndim=3, the result has shape (K,L,M,N,L,M,N), and so on.
Diagonality means that for the resulting array Y holds:
Y[...,i_1,i_2,..,i_ndim,j_1,j_2,..,j_ndim] is zero if i_n!=j_n for any n.
"""
if ndim < 0:
raise ValueError("Parameter ndim must be non-negative integer")
if ndim_from < 0:
raise ValueError("Parameter ndim_to must be non-negative integer")
if ndim_from > ndim:
raise ValueError("Parameter ndim_to must not be greater than ndim")
if ndim == 0:
return X
if np.ndim(X) < 2 * ndim_from:
raise ValueError("The array does not have enough axes")
if ndim_from > 0:
if np.shape(X)[-ndim_from:] != np.shape(X)[-2*ndim_from:-ndim_from]:
raise ValueError("The array X is not square")
if ndim == ndim_from:
return X
X = atleast_nd(X, ndim+ndim_from)
if ndim > 0:
if ndim_from > 0:
I = identity(*(np.shape(X)[-(ndim_from+ndim):-ndim_from]))
else:
I = identity(*(np.shape(X)[-ndim:]))
X = add_axes(X, axis=np.ndim(X)-ndim_from, num=ndim-ndim_from)
X = I * X
return X
def get_diag(X, ndim=1, ndim_to=0):
"""
Get the diagonal of an array.
If ndim>1, take the diagonal of the last 2*ndim axes.
"""
if ndim < 0:
raise ValueError("Parameter ndim must be non-negative integer")
if ndim_to < 0:
raise ValueError("Parameter ndim_to must be non-negative integer")
if ndim_to > ndim:
raise ValueError("Parameter ndim_to must not be greater than ndim")
if ndim == 0:
return X
if np.ndim(X) < 2*ndim:
raise ValueError("The array does not have enough axes")
if np.shape(X)[-ndim:] != np.shape(X)[-2*ndim:-ndim]:
raise ValueError("The array X is not square")
if ndim == ndim_to:
return X
n_plate_axes = np.ndim(X) - 2 * ndim
n_diag_axes = ndim - ndim_to
axes = tuple(range(0, np.ndim(X) - ndim + ndim_to))
lengths = [0, n_plate_axes, n_diag_axes, ndim_to, ndim_to]
cutpoints = list(np.cumsum(lengths))
axes_plates = axes[cutpoints[0]:cutpoints[1]]
axes_diag= axes[cutpoints[1]:cutpoints[2]]
axes_dims1 = axes[cutpoints[2]:cutpoints[3]]
axes_dims2 = axes[cutpoints[3]:cutpoints[4]]
axes_input = axes_plates + axes_diag + axes_dims1 + axes_diag + axes_dims2
axes_output = axes_plates + axes_diag + axes_dims1 + axes_dims2
return np.einsum(X, axes_input, axes_output)
def diag(X, ndim=1):
"""
Create a diagonal array given the diagonal elements.
The diagonal array can be multi-dimensional. By default, the last axis is
transformed to two axes (diagonal matrix) but this can be changed using ndim
keyword. For instance, an array with shape (K,L,M,N) can be transformed to a
set of diagonal 4-D tensors with shape (K,L,M,N,M,N) by giving ndim=2. If
ndim=3, the result has shape (K,L,M,N,L,M,N), and so on.
Diagonality means that for the resulting array Y holds:
Y[...,i_1,i_2,..,i_ndim,j_1,j_2,..,j_ndim] is zero if i_n!=j_n for any n.
"""
X = atleast_nd(X, ndim)
if ndim > 0:
I = identity(*(np.shape(X)[-ndim:]))
X = add_axes(X, axis=np.ndim(X), num=ndim)
X = I * X
return X
def m_dot(A,b):
# Compute matrix-vector product over the last two axes of A and
# the last axes of b. Other axes are broadcasted. If A has shape
# (..., M, N) and b has shape (..., N), then the result has shape
# (..., M)
#b = reshape(b, shape(b)[:-1] + (1,) + shape(b)[-1:])
#return np.dot(A, b)
return np.einsum('...ik,...k->...i', A, b)
# TODO: Use einsum!!
#return np.sum(A*b[...,np.newaxis,:], axis=(-1,))
def block_banded(D, B):
"""
Construct a symmetric block-banded matrix.
`D` contains square diagonal blocks.
`B` contains super-diagonal blocks.
The resulting matrix is:
D[0], B[0], 0, 0, ..., 0, 0, 0
B[0].T, D[1], B[1], 0, ..., 0, 0, 0
0, B[1].T, D[2], B[2], ..., ..., ..., ...
... ... ... ... ..., B[N-2].T, D[N-1], B[N-1]
0, 0, 0, 0, ..., 0, B[N-1].T, D[N]
"""
D = [np.atleast_2d(d) for d in D]
B = [np.atleast_2d(b) for b in B]
# Number of diagonal blocks
N = len(D)
if len(B) != N-1:
raise ValueError("The number of super-diagonal blocks must contain "
"exactly one block less than the number of diagonal "
"blocks")
# Compute the size of the full matrix
M = 0
for i in range(N):
if np.ndim(D[i]) != 2:
raise ValueError("Blocks must be 2 dimensional arrays")
d = np.shape(D[i])
if d[0] != d[1]:
raise ValueError("Diagonal blocks must be square")
M += d[0]
for i in range(N-1):
if np.ndim(B[i]) != 2:
raise ValueError("Blocks must be 2 dimensional arrays")
b = np.shape(B[i])
if b[0] != np.shape(D[i])[1] or b[1] != np.shape(D[i+1])[0]:
raise ValueError("Shapes of the super-diagonal blocks do not match "
"the shapes of the diagonal blocks")
A = np.zeros((M,M))
k = 0
for i in range(N-1):
(d0, d1) = np.shape(B[i])
# Diagonal block
A[k:k+d0, k:k+d0] = D[i]
# Super-diagonal block
A[k:k+d0, k+d0:k+d0+d1] = B[i]
# Sub-diagonal block
A[k+d0:k+d0+d1, k:k+d0] = B[i].T
k += d0
A[k:,k:] = D[-1]
return A
def dist_haversine(c1, c2, radius=6372795):
# Convert coordinates to radians
lat1 = np.atleast_1d(c1[0])[...,:,None] * np.pi / 180
lon1 = np.atleast_1d(c1[1])[...,:,None] * np.pi / 180
lat2 = np.atleast_1d(c2[0])[...,None,:] * np.pi / 180
lon2 = np.atleast_1d(c2[1])[...,None,:] * np.pi / 180
dlat = lat2 - lat1
dlon = lon2 - lon1
A = np.sin(dlat/2)**2 + np.cos(lat1)*np.cos(lat2)*(np.sin(dlon/2)**2)
C = 2 * np.arctan2(np.sqrt(A), np.sqrt(1-A))
return radius * C
def logsumexp(X, axis=None, keepdims=False):
"""
Compute log(sum(exp(X)) in a numerically stable way
"""
X = np.asanyarray(X)
maxX = np.amax(X, axis=axis, keepdims=True)
if np.ndim(maxX) > 0:
maxX[~np.isfinite(maxX)] = 0
elif not np.isfinite(maxX):
maxX = 0
X = X - maxX
if not keepdims:
maxX = np.squeeze(maxX, axis=axis)
return np.log(np.sum(np.exp(X), axis=axis, keepdims=keepdims)) + maxX
def normalized_exp(phi):
"""Compute exp(phi) so that exp(phi) sums to one.
This is useful for computing probabilities from log evidence.
"""
logsum_p = logsumexp(phi, axis=-1, keepdims=True)
logp = phi - logsum_p
p = np.exp(logp)
# Because of small numerical inaccuracy, normalize the probabilities
# again for more accurate results
return (
p / np.sum(p, axis=-1, keepdims=True),
logsum_p
)
def invpsi(x):
r"""
Inverse digamma (psi) function.
The digamma function is the derivative of the log gamma function.
This calculates the value Y > 0 for a value X such that digamma(Y) = X.
For the new version, see Appendix C:
http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf
For the previous implementation, see:
http://www4.ncsu.edu/~pfackler/
Are there speed/accuracy differences between the methods?
"""
x = np.asanyarray(x)
y = np.where(
x >= -2.22,
np.exp(x) + 0.5,
-1/(x - special.psi(1))
)
for i in range(5):
y = y - (special.psi(y) - x) / special.polygamma(1, y)
return y
# # Previous implementation. Is it worse? Is there difference?
# L = 1.0
# y = np.exp(x)
# while (L > 1e-10):
# y += L*np.sign(x-special.psi(y))
# L /= 2
# # Ad hoc by Jaakko
# y = np.where(x < -100, -1 / x, y)
# return y
def invgamma(x):
r"""
Inverse gamma function.
See: http://mathoverflow.net/a/28977
"""
k = 1.461632
c = 0.036534
L = np.log((x+c)/np.sqrt(2*np.pi))
W = special.lambertw(L/np.exp(1))
return L/W + 0.5
def mean(X, axis=None, keepdims=False):
"""
Compute the mean, ignoring NaNs.
"""
if np.ndim(X) == 0:
if axis is not None:
raise ValueError("Axis out of bounds")
return X
X = np.asanyarray(X)
nans = np.isnan(X)
X = X.copy()
X[nans] = 0
m = (np.sum(X, axis=axis, keepdims=keepdims) /
np.sum(~nans, axis=axis, keepdims=keepdims))
return m
def gradient(f, x, epsilon=1e-6):
return optimize.approx_fprime(x, f, epsilon)
def broadcast(*arrays, ignore_axis=None):
"""
Explicitly broadcast arrays to same shapes.
It is possible ignore some axes so that the arrays are not broadcasted
along those axes.
"""
shapes = [np.shape(array) for array in arrays]
if ignore_axis is None:
full_shape = broadcasted_shape(*shapes)
else:
try:
ignore_axis = tuple(ignore_axis)
except TypeError:
ignore_axis = (ignore_axis,)
if len(ignore_axis) != len(set(ignore_axis)):
raise ValueError("Indices must be unique")
if any(i >= 0 for i in ignore_axis):
raise ValueError("Indices must be negative")
# Put lengths of ignored axes to 1
cut_shapes = [
tuple(
1
if i in ignore_axis else
shape[i]
for i in range(-len(shape), 0)
)
for shape in shapes
]
full_shape = broadcasted_shape(*cut_shapes)
return [np.ones(full_shape) * array for array in arrays]
def block_diag(*arrays):
"""
Form a block diagonal array from the given arrays.
Compared to SciPy's block_diag, this utilizes broadcasting and accepts more
than dimensions in the input arrays.
"""
arrays = broadcast(*arrays, ignore_axis=(-1, -2))
plates = np.shape(arrays[0])[:-2]
M = sum(np.shape(array)[-2] for array in arrays)
N = sum(np.shape(array)[-1] for array in arrays)
Y = np.zeros(plates + (M, N))
i_start = 0
j_start = 0
for array in arrays:
i_end = i_start + np.shape(array)[-2]
j_end = j_start + np.shape(array)[-1]
Y[...,i_start:i_end,j_start:j_end] = array
i_start = i_end
j_start = j_end
return Y
def concatenate(*arrays, axis=-1):
"""
Concatenate arrays along a given axis.
Compared to NumPy's concatenate, this utilizes broadcasting.
"""
# numpy.concatenate doesn't do broadcasting, so we need to do it explicitly
return np.concatenate(
broadcast(*arrays, ignore_axis=axis),
axis=axis
)
|
dungvtdev/upsbayescpm
|
bayespy/utils/misc.py
|
Python
|
mit
| 42,257
|
[
"Gaussian"
] |
160c9637b3b0244c2e2d35dbced07ab4db2bc9ff3d18c8b14bcb65543e21241e
|
from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindat=lm.load_numbers('../data/fm_train_real.dat')
testdat=lm.load_numbers('../data/fm_test_real.dat')
trainlabel=lm.load_labels('../data/label_train_regression.dat')
parameter_list=[[traindat,testdat,trainlabel,10,2.1,1.2,1e-5,1e-2],
[traindat,testdat,trainlabel,11,2.3,1.3,1e-6,1e-3]]
def regression_libsvr (fm_train=traindat,fm_test=testdat,
label_train=trainlabel,size_cache=10,width=2.1,
C=1.2,epsilon=1e-5,tube_epsilon=1e-2):
sg('set_features', 'TRAIN', fm_train)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train)
sg('new_regression', 'LIBSVR')
sg('svr_tube_epsilon', tube_epsilon)
sg('c', C)
sg('train_regression')
sg('set_features', 'TEST', fm_test)
result=sg('classify')
return result
if __name__=='__main__':
print('LibSVR')
regression_libsvr(*parameter_list[0])
|
AzamYahya/shogun
|
examples/undocumented/python_static/regression_libsvr.py
|
Python
|
gpl-3.0
| 913
|
[
"Gaussian"
] |
a33676b5f51e8cc05216b2d582bd5ac631589054f456988816e8235204f90c49
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""This script runs the regression framework in the cclib-data repostiory."""
from __future__ import print_function
import os
import sys
if __name__ == "__main__":
# Assume the cclib-data repository is cloned in this directory.
regression_dir = os.path.join("..", "data", "regression")
sys.path.append(regression_dir)
import regression
opt_traceback = "--traceback" in sys.argv
opt_status = "--status" in sys.argv
# This can be used to limit the programs we want to run regressions for.
which = [arg for arg in sys.argv[1:] if not arg in ["--status", "--traceback"]]
regression.main(which, opt_traceback, opt_status, regression_dir)
|
gaursagar/cclib
|
test/run_regressions.py
|
Python
|
bsd-3-clause
| 904
|
[
"cclib"
] |
d4dff3e34cbb65ed55b44b433477a3c4055b21a043c8f03229ad12ef144c0117
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Optimization utilities.
"""
|
davidwaroquiers/pymatgen
|
pymatgen/optimization/__init__.py
|
Python
|
mit
| 142
|
[
"pymatgen"
] |
c2805a317f126b1c6643fce0873bc394778c6dfebe31ef3a51622fa2cbb9a5ca
|
# This program implements a distributed version of BWA, using Makeflow and WorkQueue
# Author: Olivia Choudhury
# Date: 09/03/2013
import optparse, os, sys, tempfile, shutil, stat
class PassThroughParser(optparse.OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self,largs,rargs,values)
except (optparse.BadOptionError,optparse.AmbiguousOptionError), e:
largs.append(e.opt_str)
#Parse Command Line
parser = PassThroughParser()
parser.add_option('', '--ref', dest="ref", type="string")
parser.add_option('', '--fastq', dest="fastq", type="string")
parser.add_option('', '--rfastq', dest="rfastq", type="string")
parser.add_option('', '--output_SAM', dest="outsam", type="string")
parser.add_option('', '--output_log', dest="outlog", type="string")
parser.add_option('', '--wq_log', dest="wqlog", type="string")
parser.add_option('', '--output_dblog', dest="dblog", type="string")
parser.add_option('', '--output_err', dest="outerr", type="string")
parser.add_option('', '--pwfile', dest="pwfile", type="string")
parser.add_option('', '--user_id', dest="uid", type="string")
parser.add_option('', '--user_job', dest="ujob", type="string")
(options, args) = parser.parse_args()
# SETUP ENVIRONMENT VARIABLES
cur_dir = os.getcwd()
job_num = os.path.basename(cur_dir);
cctools_dir = options.cctools
makeflow='Makeflow'
wq_project_name="galaxy_bwa_"+options.uid+"_"+job_num
wq_password=options.pwfile
output_sam = "output_SAM"
makeflow_log = "makeflow_log"
wq_log = "wq_log"
debug_log = "debug_log"
output_err = "output_err"
# CREATE TMP AND MOVE FILES IN
if options.ref:
os.symlink(options.ref, "./reference.fa")
else:
print "No reference provided"
sys.exit(1)
inputs = "--ref reference.fa "
os.symlink(options.fastq, "./fastq.fq")
inputs += "--fastq fastq.fq "
if options.rfastq:
os.symlink(options.rfastq, "./rfastq.fq")
inputs += "--rfastq rfastq.fq "
os.system("makeflow_bwa --algoalign {0} {1} --makeflow {2} --output_SAM {3} {4}".format(
"bwa_backtrack", inputs, makeflow, output_sam, ' '.join(args)))
os.system("makeflow {0} -T wq -N {1} -J 50 -p 0 -l {2} -L {3} -dall -o {4} --password {5} >&1 2>&1".format(
makeflow, wq_project_name, makeflow_log, wq_log, debug_log, options.pwfile))
if options.dblog:
shutil.copyfile(debug_log, options.dblog)
if options.outlog:
shutil.copyfile(makeflow_log, options.outlog)
if options.wqlog:
shutil.copyfile(wq_log, options.wqlog)
shutil.copyfile(output_sam, options.outsam)
os.system(cctools_dir+'/bin/makeflow -c')
os.remove("./reference.fa")
os.remove("./fastq.fq")
os.remove("./makeflow_bwa")
os.remove("./bwa")
if options.rfastq:
os.remove("./rfastq.fq")
|
isanwong/cctools
|
galaxy/makeflow_bwa_wrapper.py
|
Python
|
gpl-2.0
| 2,780
|
[
"BWA"
] |
58c28275b9beb5e2e6e26770d20cf69efbe4ef5246ee61203848ebe8d281180d
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyGpytorch(PythonPackage):
"""GPyTorch is a Gaussian process library implemented using PyTorch.
GPyTorch is designed for creating scalable, flexible, and modular Gaussian
process models with ease."""
homepage = "https://gpytorch.ai/"
url = "https://pypi.io/packages/source/g/gpytorch/gpytorch-1.2.1.tar.gz"
maintainers = ['adamjstewart']
version('1.2.1', sha256='ddd746529863d5419872610af23b1a1b0e8a29742131c9d9d2b4f9cae3c90781')
version('1.2.0', sha256='fcb216e0c1f128a41c91065766508e91e487d6ffadf212a51677d8014aefca84')
version('1.1.1', sha256='76bd455db2f17af5425f73acfaa6d61b8adb1f07ad4881c0fa22673f84fb571a')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-torch@1.6:', when='@1.2:', type=('build', 'run'))
depends_on('py-torch@1.5:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@1.2:', type=('build', 'run'))
depends_on('py-scipy', when='@1.2:', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-gpytorch/package.py
|
Python
|
lgpl-2.1
| 1,222
|
[
"Gaussian"
] |
6ce0fe7fbc7cb9d7af18943f9b5fac897ece4883350b9de850a9d6f32d6167ad
|
from __future__ import annotations
import random
from unittest import mock
from cctbx import sgtbx
from dxtbx.model import Crystal, Experiment, ExperimentList
from scitbx import matrix
from dials.algorithms.clustering import observers
from dials.algorithms.clustering.unit_cell import UnitCellCluster
def test_UnitCellAnalysisObserver():
# generate some random unit cells
sgi = sgtbx.space_group_info("P1")
unit_cells = [
sgi.any_compatible_unit_cell(volume=random.uniform(990, 1010))
for i in range(10)
]
# generate experiment list
experiments = ExperimentList()
U = matrix.identity(3)
for uc in unit_cells:
B = matrix.sqr(uc.fractionalization_matrix()).transpose()
direct_matrix = (U * B).inverse()
experiments.append(
Experiment(
crystal=Crystal(
direct_matrix[:3],
direct_matrix[3:6],
direct_matrix[6:9],
space_group=sgi.group(),
)
)
)
# generate dendrogram
crystal_symmetries = [expt.crystal.get_crystal_symmetry() for expt in experiments]
lattice_ids = experiments.identifiers()
ucs = UnitCellCluster.from_crystal_symmetries(
crystal_symmetries, lattice_ids=lattice_ids
)
_, dendrogram, _ = ucs.ab_cluster(write_file_lists=False, doplot=False)
# setup script
script = mock.Mock()
script._experiments = experiments
script.unit_cell_dendrogram = dendrogram
# test the observer
observer = observers.UnitCellAnalysisObserver()
observer.update(script)
assert set(observer.data) == {"experiments", "dendrogram"}
d = observer.make_plots()
assert "unit_cell_graphs" in d
|
dials/dials
|
tests/algorithms/clustering/test_observers.py
|
Python
|
bsd-3-clause
| 1,766
|
[
"CRYSTAL"
] |
2fa2895a047302dda16220f8b3df9f495cc36d31816346c6bba4dc52817ba8d9
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.conf import settings
from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA, Role
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.test import RequestFactory
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from microsite_configuration import microsite
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import _split_input_list, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from .test_tools import msk_from_problem_urlname, get_extended_due
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = str(self.problem_location)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership', {'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled', last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.registration_url = 'https://{}/register'.format(self.site_name)
self.about_url = 'https://{}/courses/MITx/999/Robot_Super_Course/about'.format(self.site_name)
self.course_url = 'https://{}/courses/MITx/999/Robot_Super_Course/'.format(self.site_name)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_enroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{course_url}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
course_url=self.course_url
)
)
def test_enroll_with_email_not_registered(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {registration_url} and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {about_url} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
registration_url=self.registration_url, about_url=self.about_url
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self):
# Try with marketing site enabled
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in Robot Super Course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_enroll_with_email_not_registered_autoenroll(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {registration_url} and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see Robot Super Course listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
registration_url=self.registration_url
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student"
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {about_url} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
about_url=self.about_url
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {course_url} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
course_url=self.course_url
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_url = 'https://{}/courses/MITx/999/Robot_Super_Course/about'.format(self.site_name)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit {1} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {2}".format(
self.notenrolled_student.profile.name,
self.about_url,
self.notenrolled_student.email
)
)
def test_add_notenrolled_with_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True, 'auto_enroll': True}
)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been removed from a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"Robot Super Course at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course=self.course.id)
self.other_staff = StaffFactory(course=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
# Test add discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "allow")
# Test revoke discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "revoke")
# Test add discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "allow")
# Test revoke discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "revoke")
# Test add discussion community TA with email.
self.assert_update_forum_role_membership(self.other_user.email, "Community TA", "allow")
# Test revoke discussion community TA with username.
self.assert_update_forum_role_membership(self.other_user.username, "Community TA", "revoke")
def assert_update_forum_role_membership(self, unique_student_identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': unique_student_identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = self.other_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized user ID","Course Specific Anonymized user ID"'
'\n"2","41","42"\n'
))
self.assertTrue(body.endswith('"7","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
def test_calculate_grades_csv_success(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.return_value = True
response = self.client.get(url, {})
success_status = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertIn(success_status, response.content)
def test_calculate_grades_csv_already_running(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = str(self.problem_location)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = str(self.problem_location)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.id)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id!s}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id.to_deprecated_string(),
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), [u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create(due=due)
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(None,
get_extended_due(self.course, self.week1, self.user1))
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
|
morenopc/edx-platform
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 91,167
|
[
"VisIt"
] |
252c60bd5036e0e728701c835c3a74f56389e8061f397e8587c470de487ccd36
|
import datetime
import requests
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
from urlparse import parse_qs
from urllib import urlencode
from .constants import (REQUEST_TOKEN_URL, AUTHORIZE_URL, ACCESS_TOKEN_URL, XERO_API_URL,
PARTNER_REQUEST_TOKEN_URL, PARTNER_AUTHORIZE_URL, PARTNER_ACCESS_TOKEN_URL, PARTNER_XERO_API_URL, )
from .exceptions import *
class PrivateCredentials(object):
"""An object wrapping the 2-step OAuth process for Private Xero API access.
Usage:
1) Construct a PrivateCredentials() instance:
>>> from xero.auth import PrivateCredentials
>>> credentials = PrivateCredentials(<consumer_key>, <rsa_key>)
rsa_key should be a multi-line string, starting with:
-----BEGIN RSA PRIVATE KEY-----\n
2) Use the credentials:
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, rsa_key):
self.consumer_key = consumer_key
self.rsa_key = rsa_key
# Private API uses consumer key as the OAuth token.
self.oauth_token = consumer_key
self.oauth = OAuth1(
self.consumer_key,
resource_owner_key=self.oauth_token,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
)
self.oauth.api_url = XERO_API_URL
class PublicCredentials(object):
"""An object wrapping the 3-step OAuth process for Public Xero API access.
Usage:
1) Construct a PublicCredentials() instance:
>>> from xero import PublicCredentials
>>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>)
2) Visit the authentication URL:
>>> credentials.url
If a callback URI was provided (e.g., https://example.com/oauth),
the user will be redirected to a URL of the form:
https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
from which the verifier can be extracted. If no callback URI is
provided, the verifier will be shown on the screen, and must be
manually entered by the user.
3) Verify the instance:
>>> credentials.verify(<verifier string>)
4) Use the credentials.
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, consumer_secret,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None,
scope=None):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self.scope = scope
self._oauth = None
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# crednetials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
oauth = OAuth1(
consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri
)
response = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
if response.status_code == 200:
credentials = parse_qs(response.text)
self.oauth_token = credentials.get('oauth_token')[0]
self.oauth_token_secret = credentials.get('oauth_token_secret')[0]
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize the OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.verified = True
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret
)
self._oauth.api_url = XERO_API_URL
@property
def state(self):
"""Obtain the useful state of this credentials object so that
we can reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
'consumer_key', 'consumer_secret', 'callback_uri',
'verified', 'oauth_token', 'oauth_token_secret', 'scope'
)
if getattr(self, attr) is not None
)
def verify(self, verifier):
"Verify an OAuth token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
verifier=verifier
)
# Make the verification request, gettiung back an access token
response = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
if response.status_code == 200:
credentials = parse_qs(response.text)
# Initialize the oauth credentials
self._init_oauth(
credentials.get('oauth_token')[0],
credentials.get('oauth_token_secret')[0]
)
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
@property
def url(self):
"Returns the URL that can be visited to obtain a verifier code"
query_string = {'oauth_token': self.oauth_token}
if self.scope:
query_string['scope'] = self.scope
return AUTHORIZE_URL + '?' + urlencode(query_string)
@property
def oauth(self):
"Returns the requests-compatible OAuth object"
if self._oauth is None:
raise XeroNotVerified("Public credentials haven't been verified")
return self._oauth
class PartnerCredentials(object):
"""An object wrapping the 3-step OAuth process for Partner Xero API access.
Usage is similar to Public Credentials, but with RSA encryption and automatic refresh of expired
tokens.
Usage:
1) Construct a PublicCredentials() instance:
>>> from xero import PublicCredentials
>>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>, <rsa_key>)
2) Visit the authentication URL:
>>> credentials.url
If a callback URI was provided (e.g., https://example.com/oauth),
the user will be redirected to a URL of the form:
https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
from which the verifier can be extracted. If no callback URI is
provided, the verifier will be shown on the screen, and must be
manually entered by the user.
3) Verify the instance:
>>> credentials.verify(<verifier string>)
4) Use the credentials.
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, consumer_secret, rsa_key, client_cert,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None, oauth_session_handle=None,
oauth_expires_at=None, oauth_authorization_expires_at=None,
scope=None):
"""Construct the auth instance.
Must provide the consumer key, secret, and RSA key.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.rsa_key = rsa_key
self.client_cert = client_cert
self.callback_uri = callback_uri
self.verified = verified
self.oauth_session_handle = oauth_session_handle
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self.scope = scope
self._oauth = None
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
oauth = OAuth1(
consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
)
response = requests.post(url=PARTNER_REQUEST_TOKEN_URL, auth=oauth, cert=client_cert)
if response.status_code == 200:
credentials = parse_qs(response.text)
self.oauth_token = credentials.get('oauth_token')[0]
self.oauth_token_secret = credentials.get('oauth_token_secret')[0]
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize the OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.verified = True
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
)
self._oauth.client_cert = self.client_cert
self._oauth.api_url = PARTNER_XERO_API_URL
@property
def state(self):
"""Obtain the useful state of this credentials object so that
we can reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
'consumer_key', 'consumer_secret', 'callback_uri',
'verified', 'oauth_token', 'oauth_token_secret',
'oauth_session_handle', 'oauth_expires_at',
'oauth_authorization_expires_at', 'scope'
)
if getattr(self, attr) is not None
)
def verify(self, verifier):
"Verify an OAuth token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
verifier=verifier,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
)
# Make the verification request, getting back an access token
response = requests.post(url=PARTNER_ACCESS_TOKEN_URL, auth=oauth, cert=self.client_cert)
self._process_access_token_response(response)
def refresh(self):
"Refresh an expired token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
)
# Make the verification request, getting back an access token
params = {'oauth_session_handle': self.oauth_session_handle}
response = requests.post(url=PARTNER_ACCESS_TOKEN_URL, params=params, auth=oauth, cert=self.client_cert)
self._process_access_token_response(response)
def _process_access_token_response(self, response):
if response.status_code == 200:
credentials = parse_qs(response.text)
# Initialize the oauth credentials
self._init_oauth(
credentials.get('oauth_token')[0],
credentials.get('oauth_token_secret')[0]
)
self.oauth_expires_in = credentials.get('oauth_expires_in')[0]
self.oauth_session_handle = credentials.get('oauth_session_handle')[0]
self.oauth_authorisation_expires_in = credentials.get('oauth_authorization_expires_in')[0]
# Calculate token/auth expiry
self.oauth_expires_at = datetime.datetime.now() + \
datetime.timedelta(seconds=int(self.oauth_expires_in))
self.oauth_authorization_expires_at = \
datetime.datetime.now() + \
datetime.timedelta(seconds=int(self.oauth_authorisation_expires_in))
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
@property
def url(self):
"Returns the URL that can be visited to obtain a verifier code"
query_string = {'oauth_token': self.oauth_token}
if self.scope:
query_string['scope'] = self.scope
return PARTNER_AUTHORIZE_URL + '?' + urlencode(query_string)
@property
def oauth(self):
"Returns the requests-compatible OAuth object"
if self._oauth is None:
raise XeroNotVerified("Public credentials haven't been verified")
return self._oauth
|
skillflip/pyxero
|
xero/auth.py
|
Python
|
bsd-3-clause
| 18,704
|
[
"VisIt"
] |
63d29966c7d315dcaf84aab9ee819e7a819adeac171c4fab79ef53c0c58c3614
|
#encoding:utf-8
""" unit tests.
Data required for these tests is the example project my_project mentioned in the tutorial.
"""
# tests to do:
# mTAUX[X,0] should yield ValueError: Slice axis argument X not in grid (time, yu)
# when mTAUX.grid is (time, yu)
import inspect
import copy
import unittest
import os
import numpy as np
import spacegrids as sg
class TestValuedClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_slice_method(self):
K = sg.Valued('K',np.array([1.,2.,3.,4.]))
R=K.sliced(slice(1,None,None))
self.assertEqual( np.array_equal( R.value, np.array([ 2., 3., 4.]) ), True )
# test the info function
class TestInfo(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
self.fixture = sg.info_dict()
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_type(self):
self.assertEqual(type(self.fixture),dict)
def test_type2(self):
D = self.fixture
if len(D) > 0:
self.assertEqual(type(D.keys()[0]),str)
def test_paths_in_D_exist(self):
D = self.fixture
for path in D.values():
self.assertEqual(os.path.exists(path), True)
class Test_project_helpers(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
self.fixture = D
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_isexpdir_on_project_dir(self):
D = self.fixture
self.assertEqual(set(sg.isexpdir(os.path.join(D['my_project']))), set(['DPO', 'DPC','Lev.cdf'] ) )
def test_isexpdir_on_exper_dir(self):
D = self.fixture
self.assertEqual(sg.isexpdir(os.path.join(D['my_project'], 'DPO')), ['time_mean.nc'] )
class TestMathOnCoords(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
# Coords ---
coord1 = sg.fieldcls.Coord(name = 'test1',direction ='X',axis='X',value =np.linspace(-10.,10.,100) , metadata = {'hi':5} )
self.fixture = coord1
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_coord_gaussian_method(self):
"""Test gaussian method of Coord.
"""
coord1 = self.fixture
W=coord1.gaussian(30,1)
self.assertEqual(round(np.max(W.value),5), 1.)
self.assertEqual(round(np.min(W.value),5), 0.)
def test_sgmax_sgmin_function(self):
"""Test sgmax and sgmin function.
"""
coord1 = self.fixture
W=coord1.gaussian(30,1)
self.assertEqual(round(sg.sgmax(W),5), 1.)
self.assertEqual(round(sg.sgmin(W),5), 0.)
def test_sgnanmax_sgnanmin_function(self):
"""Test sgnanmax and sgnanmin function.
"""
coord1 = self.fixture
W=coord1.gaussian(30,1)
W[50] = np.nan
self.assertEqual(round(sg.sgnanmax(W),5), 1.)
self.assertEqual(round(sg.sgnanmin(W),5), 0.)
def test_nanargmax_nanargmin_function(self):
"""Test nanargmax and nanargmin function.
"""
coord1 = self.fixture
W=coord1.gaussian(30,1)
W[50] = np.nan
self.assertEqual(sg.nanargmax(W.value), (30,))
self.assertEqual(sg.nanargmin(W.value), (99,))
class TestCoordsOnTheirOwn(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
def provide_axis(cstack):
for i, c in enumerate(cstack):
cstack[i].axis = cstack[i].direction
return cstack
# Note that some coord values are deliberately unordered.
# Coords ---
coord1 = sg.fieldcls.Coord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), strings = ['one','two','three'] , metadata = {'hi':5} )
coord2 = sg.fieldcls.Coord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
coord3 = sg.fieldcls.Coord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
coord4 = sg.fieldcls.Coord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
coord5 = sg.fieldcls.Coord(name = 'test2',direction ='Y',value =np.array([1,2,3, 4]), metadata = {'hi':10})
coord6 = sg.fieldcls.Coord(name = 'test',direction ='X',value =np.array([5,1,2,3, 4]), metadata = {'hi':12})
# providing coord1 and coord2 with duals. coord3 is self-dual
coord1_edges = sg.fieldcls.Coord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), strings = ['a','b','c','d'] , dual = coord1 , metadata = {'hi':25} )
coord2_edges = sg.fieldcls.Coord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = coord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
coord4_edges = sg.fieldcls.Coord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = coord4 , metadata = {'hi':25} )
coord5_edges = sg.fieldcls.Coord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = coord5, metadata = {'hi':77})
# YCoords ---
ycoord1 = sg.fieldcls.YCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
ycoord2 = sg.fieldcls.YCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
ycoord3 = sg.fieldcls.YCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
ycoord4 = sg.fieldcls.YCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
ycoord5 = sg.fieldcls.YCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3., 4.]), metadata = {'hi':10})
ycoord6 = sg.fieldcls.YCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3., 4.]), metadata = {'hi':12})
ycoord1_edges = sg.fieldcls.YCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = ycoord1 , metadata = {'hi':25} )
ycoord2_edges = sg.fieldcls.YCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = ycoord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
ycoord4_edges = sg.fieldcls.YCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = ycoord4 , metadata = {'hi':25} )
ycoord5_edges = sg.fieldcls.YCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = ycoord5, metadata = {'hi':77})
# XCoords ---
xcoord1 = sg.fieldcls.XCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
xcoord2 = sg.fieldcls.XCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
xcoord3 = sg.fieldcls.XCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
xcoord4 = sg.fieldcls.XCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
xcoord5 = sg.fieldcls.XCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3., 4.]), metadata = {'hi':10})
xcoord6 = sg.fieldcls.XCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3., 4.]), metadata = {'hi':12})
xcoord1_edges = sg.fieldcls.XCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = xcoord1 , metadata = {'hi':25} )
xcoord2_edges = sg.fieldcls.XCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = xcoord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
xcoord4_edges = sg.fieldcls.XCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = xcoord4 , metadata = {'hi':25} )
xcoord5_edges = sg.fieldcls.XCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = xcoord5, metadata = {'hi':77})
# we are testing for Coord, YCoord and XCoord
cstack1 = provide_axis([coord1,coord2,coord3,coord1_edges,coord2_edges])
cstack2 = provide_axis([coord4,coord5,coord6,coord4_edges,coord5_edges])
ycstack1 = provide_axis([ycoord1,ycoord2,ycoord3,ycoord1_edges,ycoord2_edges])
ycstack2 = provide_axis([ycoord4,ycoord5,ycoord6,ycoord4_edges,ycoord5_edges])
xcstack1 = provide_axis([xcoord1,xcoord2,xcoord3,xcoord1_edges,xcoord2_edges])
xcstack2 = provide_axis([xcoord4,xcoord5,xcoord6,xcoord4_edges,xcoord5_edges])
self.fixture = [cstack1, cstack2, ycstack1, ycstack2,xcstack1, xcstack2,]
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_init_method(self):
"""Test the __init__ method of Coord
"""
self.assertRaises(ValueError, sg.fieldcls.Coord, **{'name' : 'test1','direction' :'X','value': np.array([1.,2.,3.]) , 'metadata': {'hi':5}, 'strings': ['foo','bar'] })
def test_get_item_method(self):
"""Test the __getitem__ method of Coord class for success, failure and raised error.
"""
coord1 = self.fixture[0][0]
self.assertEqual(coord1[1], 2.)
def test_coord_array_equal_method(self):
"""Test the array_equal method of Coord class for success, failure and raised error.
"""
coord1 = self.fixture[0][0]
coord2 = self.fixture[0][1]
coord4 = self.fixture[1][0]
self.assertEqual(coord1.array_equal(coord2), False)
self.assertEqual(coord1.array_equal(coord4), True)
self.assertRaises(TypeError, coord1.array_equal, 5)
def test_coord_init_attributes_assigned(self):
"""
Test whether all passed are assigned to attributes as intended. This is easy to forget when adding new arguments.
"""
pass
def test_coord_sliced_method(self):
"""Tests whether slicing works"""
coord1 = self.fixture[0][0] # this one has string property set
coord2 = self.fixture[0][1] # this one doesn't
coord4 = self.fixture[1][0]
K=coord1(coord1*coord2)
R = coord1.coord_shift(K,1)
self.assertEqual( (R[1,1:3]).shape, (2,) )
self.assertEqual( (isinstance(R[1,1:3]), sg.Field), True )
self.assertEqual( (isinstance(R[1,1:2]), sg.Field), False ) # float
self.assertEqual( (isinstance(R[1,2]), sg.Field), False ) # float
def test_coord_sliced_method(self):
"""Tests whether slicing works"""
coord1 = self.fixture[0][0] # this one has string property set
coord2 = self.fixture[0][1] # this one doesn't
coord4 = self.fixture[1][0]
self.assertEqual(coord1.sliced(slice(None,None,None) ) is coord1, True )
slice_obj = slice(1,None,None)
coord1_sliced = coord1.sliced( slice_obj )
coord2_sliced = coord2.sliced( slice_obj )
self.assertEqual(np.array_equal(coord1_sliced.value, coord1.value[slice_obj] ) , True)
self.assertEqual(np.array_equal(coord1_sliced.strings, coord1.strings[slice_obj] ) , True)
# the dual Coord should also be sliced and be properly assigned:
self.assertEqual(len(coord1_sliced.dual.value) , len(coord1.dual.value) -1 )
# the dual should remain one longer
self.assertEqual(len(coord1_sliced.dual.value) , len(coord1_sliced.value) + 1 )
self.assertEqual(coord1_sliced.dual.dual, coord1_sliced)
self.assertEqual(coord2_sliced.strings, None)
# test integer slice:
self.assertEqual(len(coord1.sliced(2) ) , 1)
# Now make coord1 self dual to test for self-dual Coord object:
coord1.give_dual()
# as an aside, test whether give_dual worked:
self.assertEqual(coord1.dual is coord1, True)
# ok, slice again:
coord1_sliced = coord1.sliced( slice_obj )
# the sliced coord should remain self-dual:
self.assertEqual(coord1_sliced.dual is coord1_sliced, True)
# ---------- test block for Coord class ------
def test_coord_mult_with_AxGr(self):
"""
Test copy method of Coord.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
Z = sg.fieldcls.Ax('Z')
coord1.give_axis(X)
coord2.give_axis(Y)
coord3.give_axis(Z)
coord3.direction = 'Z'
self.assertEqual( (X*Y)*(coord1*coord2*coord3) , coord1*coord2 )
self.assertEqual( (Y*X)*(coord1*coord2*coord3) , coord2*coord1 )
def test_copy_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3_copy.name, 'joep' )
def test_copy_method_yields_not_same_for_case_dual(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
Z = sg.Ax('Z')
coord3_copy = coord3.copy(dual = coord2)
test_args = {'name':'joep', 'value':np.array([1.,2.,3.]),'dual':coord2,'axis':Z,'direction':'Z','units':'cm','long_name':'this is a coordinate in the x direction','metadata':{'hi':0},'strings':['five','one','two','three','four']}
for ta in test_args:
value = test_args[ta]
coord3_copy = coord3.copy(**{ta:value})
coord_att = getattr(coord3_copy,ta)
if isinstance(coord_att,np.ndarray):
self.assertEqual(np.array_equal(coord_att, value), True )
else:
self.assertEqual(coord_att, value )
def test_coord_copy_self_dual(self):
"""
Test copy method of Coord.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# coord1 and coord2 have non-self duals, coord3 is self-dual.
copy_coord3 = coord3.copy()
# test whether coord3 remains self-dual under operation:
self.assertEqual(copy_coord3.dual is copy_coord3.dual , True )
def test_coord_copy_other_dual(self):
"""
Test copy method of Coord.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# coord1 and coord2 have non-self duals, coord3 is self-dual.
copy_coord2 = coord2.copy()
self.assertEqual(copy_coord2.dual is coord2.dual , True )
def test_coord_neg_self_dual(self):
"""
Test __neg__ method of Coord.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# coord1 and coord2 have non-self duals, coord3 is self-dual.
minus_coord3 = -coord3
# test whether coord3 remains self-dual under operation:
self.assertEqual(minus_coord3.dual, minus_coord3 )
def test_coord_neg_other_dual(self):
"""
Test __neg__ method of Coord on value for dual.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# coord1 and coord2 have non-self duals:
minus_coord2 = -coord2
self.assertEqual( np.array_equal(minus_coord2.dual.value, -coord2.dual.value), True )
def test_coord_neg_value_is_neg(self):
"""
Test __neg__ method of Coord for Coord object itself on value.
"""
cstack1 = self.fixture[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# coord1 and coord2 have non-self duals:
minus_coord2 = -coord2
self.assertEqual( np.array_equal(minus_coord2.value, -coord2.value), True )
def test_same_method_yields_same(self):
"""
Test whether making a copy with no arguments passed to .copy method yields a Coord object that is the same (with respect to .same method) as the original (although a different object in memory). Also tested for other Coord objects from fixture and for hybrid axis attributes (one str, one Ax).
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord4 = self.fixture[1][0]
coord3 = cstack1[2]
coord3_copy = coord3.copy()
self.assertEqual(coord3.same(coord3_copy),True )
self.assertEqual(coord1.same(coord4),True )
coord4.axis = sg.fieldcls.Ax(coord4.axis)
self.assertEqual(coord1.same(coord4),True )
self.assertEqual(coord4.same(coord1),True )
self.assertEqual(coord1.same(coord3),False )
def test_same_method_yields_not_same_for_case_array(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3_copy = coord3.copy(value = np.array([5,6,7]))
self.assertEqual(coord3.same(coord3_copy), False )
def test_same_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3.same(coord3_copy), False )
def test_same_method_yields_not_same_for_case_axis(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3_copy = coord3.copy(axis = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def test_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def test_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def test_cast_method_2D_grid(self):
"""
Test Coord cast method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
F = coord1.cast(coord1*coord2)
self.assertEqual(F.shape, (3,4) )
self.assertEqual( np.array_equal( F.value[1,:], np.array([2,2,2,2]) ), True )
self.assertEqual( np.array_equal( F.value[:,1], np.array([1.,2.,3.]) ), True )
def test_copy_equiv_method(self):
"""
Test whether Coord.copy yields a new self-equivalent Coord object.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
K = coord1.copy(name='ho')
self.assertEqual(K.is_equiv(K), True )
def test_make_equiv_method(self):
"""
Test Coord make_equiv method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord1.make_equiv(coord2)
self.assertEqual(coord2.associative, coord1.associative )
def test_is_equiv_method_false(self):
"""
Test Coord is_equiv method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
self.assertEqual(coord1.is_equiv(coord2), False )
def test_is_equiv_method_true(self):
"""
Test Coord is_equiv method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord1.make_equiv(coord2)
self.assertEqual(coord1.is_equiv(coord2), True )
self.assertEqual(coord2.is_equiv(coord1), True )
def test_eq_in_method_false(self):
"""
Test Coord eq_in method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
self.assertEqual(coord1.eq_in(coord2*coord3), None )
def test_eq_in_method_true(self):
"""
Test Coord eq_in method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord1.make_equiv(coord2)
self.assertEqual(coord1.eq_in(coord2*coord3), coord2 )
def test_coord_from_scratch_equiv_to_axis(self):
"""
Test whether newly created Coord objects are equivalent to their axis
"""
s = (123, 16, 16) # shape
X = sg.Ax('X')
Y = sg.Ax('Y')
T = sg.Ax('T')
c0 = sg.Coord(name='time',value=np.arange(s[0]),axis=T,direction='T')
c1 = sg.Coord(name='y',value=np.arange(s[1]),axis=Y,direction='Y')
c2 = sg.Coord(name='x',value=np.arange(s[2]),axis=X,direction='X')
F = sg.Field(name='data',value=np.ones(s),grid = c0*c1*c2)
G = F/X # this will fail if c0 not equiv to T etc. So testing whether automatically c0 equiv to T
self.assertEqual(G.shape,(123, 16))
def test_pow_method(self):
"""
Test Coord __pow__ method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
self.assertEqual(coord1**2, sg.Gr((coord1,)) )
def test_mul_method_non_equiv(self):
"""
Test Coord __mul__ method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
self.assertEqual((coord1*coord2).shape(), (3,4) )
def test_mul_method_equiv(self):
"""
Test Coord __mul__ method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord1.make_equiv(coord2)
self.assertEqual((coord1*coord2).shape(), (3,) )
def test_roll_function_non_masked(self):
"""Test the sg roll function
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
K = coord1(coord1*coord2)
R= sg.roll(K,coord=coord1,mask = False)
# test whether Field.roll method compatible with sg.roll.
self.assertEqual( np.array_equal(R.value, K.roll(shift=1 , crd=coord1).value),True)
self.assertEqual( np.array_equal( R.value[0,:], np.array([3.,3.,3.,3.]) ), True )
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
# first coord in R.grid is replaced:
self.assertEqual( R.grid[0] is coord1, False )
# second coord in R.grid is not replaced:
self.assertEqual( R.grid[1] is coord2, True )
# test whether coord in R.grid is properly rolled:
self.assertEqual( np.array_equal(R.grid[0].value , np.array( [3., 1., 2.] ) ) , True )
def test_roll_function_non_masked_keepgrid(self):
"""Test the sg roll function
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
K = coord1(coord1*coord2)
R= sg.roll(K,coord=coord1,mask = False, keepgrid = True)
# first coord in R.grid is not replaced:
self.assertEqual( R.grid[0] is coord1, True )
# second coord in R.grid is not replaced:
self.assertEqual( R.grid[1] is coord2, True )
def test_roll_function_masked(self):
"""Test the sg roll function
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
K = coord1(coord1*coord2)
R= sg.roll(K,coord=coord1,mask = True)
self.assertEqual( np.isnan( R.value[0,:] ).all() , True )
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
def test_coord_shift_method(self):
"""
Test Coord coord_shift method.
Need to check the nan's that show up as numbers in the exposed area.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# going to create 2D field K by regridding coord1, and then apply method to K to obtain R
# cast coord1 to Field defined on test grid coord1*coord2
K = coord1.cast(coord1*coord2)
R = coord1.coord_shift(K,1) # and apply method to it to obtain Field R
# test whether newly exposed area (1D strip) is filled with the default fill value, nan
self.assertEqual( np.isnan( R.value[0,:] ).all() , True )
# test whether R is constant in coord2 direction
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
# The default value of keepgrid is False, leading to replacement of the first Coord, named 'test1_rolled', in the grid:
self.assertEqual(R.grid[0].name == 'test1_rolled' , True )
self.assertEqual( np.array_equal( R.grid[0].value, np.array([3.,1.,2.]) ), True )
#
def test_coord_shift_method_keepgrid_arg(self):
"""
Test Coord coord_shift method with keepgrid arg True
Need to check the nan's that show up as numbers in the exposed area.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# going to create 2D field K by regridding coord1, and then apply method to K to obtain R
# cast coord1 to Field defined on test grid coord1*coord2
K = coord1.cast(coord1*coord2)
R = coord1.coord_shift(K,1,keepgrid=True) # and apply method to it to obtain Field R
# test whether newly exposed area (1D strip) is filled with the default fill value, nan
self.assertEqual( np.isnan( R.value[0,:] ).all() , True )
# test whether R is constant in coord2 direction
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
# The default value of keepgrid is False, leading to replacement of the first Coord, named 'test1_rolled', in the grid:
self.assertEqual(R.grid[0].name == 'test1' , True )
self.assertEqual( np.array_equal( R.grid[0].value, np.array([1.,2.,3.]) ), True )
def test_coord_shift_method_nan_val_arg(self):
"""
Test Coord coord_shift method with nan_val arg set to 10
Need to check the nan's that show up as numbers in the exposed area.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# going to create 2D field K by regridding coord1, and then apply method to K to obtain R
# cast coord1 to Field defined on test grid coord1*coord2
K = coord1.cast(coord1*coord2)
R = coord1.coord_shift(K,1,nan_val = 10.) # and apply method to it to obtain Field R
# test whether newly exposed area (1D strip) is filled with the default fill value, nan
self.assertEqual( ( R.value[0,:] ==10. ).all() , True )
# test whether R is constant in coord2 direction
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
# The default value of keepgrid is False, leading to replacement of the first Coord, named 'test1_rolled', in the grid:
self.assertEqual(R.grid[0].name == 'test1_rolled' , True )
self.assertEqual( np.array_equal( R.grid[0].value, np.array([3.,1.,2.]) ), True )
def test_directed_field_addition_X_X(self):
"""
Test adding two fields of various directions
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# going to create 2D field K by regridding coord1.
K = coord1.cast(coord1*coord2)
# R = coord1.coord_shift(K,1,keepgrid=True) # and apply method to it to obtain Field R
M = K.copy(direction='X')
L = K.copy(direction='X')
self.assertEqual( np.sum( (M + L).value ), 48.0 )
self.assertEqual( np.sum( (M - L).value ), 0.0 )
def test_directed_field_addition_scalar_X(self):
"""
Test adding two fields of various directions
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# going to create 2D field K by regridding coord1.
K = coord1.cast(coord1*coord2)
# R = coord1.coord_shift(K,1,keepgrid=True) # and apply method to it to obtain Field R
M = K.copy(direction='scalar')
L = K.copy(direction='X')
self.assertEqual( np.sum( (M + L).value ), 48.0 )
self.assertEqual( np.sum( (M - L).value ), 0.0 )
self.assertEqual( np.sum( (L + M).value ), 48.0 )
self.assertEqual( np.sum( (-(L - M)).value ), 0.0 )
def test_trans_method(self):
"""
Test Coord trans method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# cast coord1 to Field defined on test grid coord1*coord2
K = coord1.cast(coord1*coord2)
R = coord1.trans(K) # and apply trans to it
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
self.assertEqual( np.array_equal( R.value[2,:], np.array([1.,1.,1.,1.]) ), True )
self.assertEqual( R.grid[0] is coord1 , True )
def test_sum_method(self):
"""
Test Coord sum method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
K = coord1(coord1*coord2)
R = coord1.sum(K)
self.assertEqual( np.array_equal(R.value, 6*np.array([1.,1.,1.,1.]) ), True )
self.assertEqual( coord1.sum(sg.ones(coord1**2)) , 3.0 )
# should error if coord1 not in gr of argument field:
self.assertRaises( ValueError, coord1.sum, sg.ones(coord2*coord3) )
def test_roll_method(self):
"""
Test Coord roll method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
K = coord1.roll(1)
self.assertEqual( np.array_equal(coord1, np.array([1.,2.,3.]) ), True )
self.assertEqual( np.array_equal(K.value, np.array([3.,1.,2.]) ), True )
def test_flip_method(self):
"""
Test Coord flip method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
K = coord1(coord1*coord2)
R = coord1.flip(K)
self.assertEqual( np.array_equal(R.value[:,1], np.array([3.,2.,1.]) ), True )
def test_flip_method_transpose_of_previous(self):
"""
Test Coord flip method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# order of coord product reversed with respect to previous test:
K = coord1(coord2*coord1)
R = coord1.flip(K)
self.assertEqual( np.array_equal(R.value[1,:], np.array([3.,2.,1.]) ), True )
def test_cumsum_method(self):
"""
Test Coord cumsum method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
# order of coord product reversed with respect to previous test:
ONES = sg.ones(coord1*coord2)
R = coord1.cumsum(ONES )
self.assertEqual(R.grid,ONES.grid)
self.assertEqual( np.array_equal(R.value[0,:], np.array([3.,3.,3.,3.]) ), True )
R = coord1.cumsum(ONES , upward = True )
self.assertEqual( np.array_equal(R.value[-1,:], np.array([3.,3.,3.,3.]) ), True )
# should error if coord1 not in gr of argument field:
self.assertRaises( ValueError, coord1.cumsum, sg.ones(coord2*coord3) )
def test_der_method(self):
"""
Test Coord der method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
# make up Ax to use for coord1:
W = sg.fieldcls.Ax('W', direction='W')
coord1.give_axis(W)
W2 = sg.fieldcls.Ax('W2', direction='W2')
coord1.give_axis(W2)
K = coord1(coord1*coord2)
R=coord1.der(K)
R_with_Ax_method = W.der(K)
R_with_Field_method = K.der(W)
value_R = copy.deepcopy(R.value)
value_R_wam = copy.deepcopy(R_with_Ax_method.value)
value_R_wfm = copy.deepcopy(R_with_Field_method.value)
value_R[np.isnan(value_R)] = 0. # to be able to do np.array_equal
value_R_wam[np.isnan(value_R_wam)] = 0.
value_R_wfm[np.isnan(value_R_wfm)] = 0.
# test whether Ax.der calls coord1.der properly:
self.assertEqual(np.array_equal(value_R, value_R_wam), True)
# test whether Field.der calls Ax.der properly:
self.assertEqual(np.array_equal(value_R, value_R_wfm), True)
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1., 1., 1.]) ), True )
self.assertEqual( (np.isnan( R.value )[0,:]).all(), True )
def test_delta_dist_method(self):
"""
Test Coord delta_dist method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
self.assertEqual( np.array_equal( coord1.delta_dist().value[1:], np.array([ 1., 1.]) ), True )
self.assertEqual( np.isnan( coord1.delta_dist()[0] ), True )
def test_d_method(self):
"""
Test Coord d method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
self.assertEqual( np.array_equal( coord1.d().value, np.array([ 1., 1., 1.]) ), True )
def test_vol_method(self):
"""
Test Coord vol method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
self.assertEqual( np.array_equal( coord1.vol(coord1*coord2).value, np.array([ 1., 1., 1.]) ), True )
self.assertRaises(ValueError, coord1.vol , coord2*coord3 )
# -------- test block for YCoord class ---------------
def testY_copy_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[2]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3_copy.name, 'joep' )
def testY_copy_method_yields_not_same_for_case_dual(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[2]
coord2 = cstack1[1]
coord3 = cstack1[2]
Z = sg.Ax('Z')
coord3_copy = coord3.copy(dual = coord2)
test_args = {'name':'joep', 'value':np.array([1.,2.,3.]),'dual':coord2,'axis':Z,'direction':'Z','units':'cm','long_name':'this is a coordinate in the x direction','metadata':{'hi':0},'strings':['five','one','two','three','four']}
for ta in test_args:
value = test_args[ta]
coord3_copy = coord3.copy(**{ta:value})
coord_att = getattr(coord3_copy,ta)
if isinstance(coord_att,np.ndarray):
self.assertEqual(np.array_equal(coord_att, value), True )
else:
self.assertEqual(coord_att, value )
def test_Ysame_method_yields_same(self):
"""
Test whether making a copy with no arguments passed to .copy method yields a Coord object that is the same (with respect to .same method) as the original (although a different object in memory).
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy()
self.assertEqual(coord3.same(coord3_copy),True )
def testY_same_method_yields_not_same_for_case_array(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy(value = np.array([5,6,7]))
self.assertEqual(coord3.same(coord3_copy), False )
def testY_same_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3.same(coord3_copy), False )
def testY_same_method_yields_not_same_for_case_axis(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy(axis = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def testY_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def testY_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[2]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
# -------- test block for XCoord class ---------------
def testX_copy_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[4]
coord2 = cstack1[1]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3_copy.name, 'joep' )
def testX_copy_method_yields_not_same_for_case_dual(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is the same as the original (although a different object in memory) and differs in that specific attribute.
"""
cstack1 = self.fixture[4]
coord2 = cstack1[1]
coord3 = cstack1[2]
Z = sg.Ax('Z')
coord3_copy = coord3.copy(dual = coord2)
test_args = {'name':'joep', 'value':np.array([1.,2.,3.]),'dual':coord2,'axis':Z,'direction':'Z','units':'cm','long_name':'this is a coordinate in the x direction','metadata':{'hi':0},'strings':['five','one','two','three','four']}
for ta in test_args:
value = test_args[ta]
coord3_copy = coord3.copy(**{ta:value})
coord_att = getattr(coord3_copy,ta)
if isinstance(coord_att,np.ndarray):
self.assertEqual(np.array_equal(coord_att, value), True )
else:
self.assertEqual(coord_att, value )
def testX_roll_method(self):
"""
Test XCoord roll method.
"""
cstack1 = self.fixture[4]
coord1 = cstack1[0]
# Check the shift is re-entrant:
self.assertEqual( np.array_equal(coord1.roll(1).value, np.array([-357., 1., 2.])) , True )
self.assertEqual( np.array_equal(coord1.roll(-1).value, np.array([-358., -357., 1.])) , True )
def testX_coord_shift_method(self):
"""
Test XCoord coord_shift method.
Need to check no nan's show up in the exposed area.
"""
cstack1 = self.fixture[4]
coord1 = cstack1[0]
coord2 = cstack1[1]
K = coord1(coord1*coord2)
R = coord1.coord_shift(K,1)
# Check the shift is re-entrant:
self.assertEqual( np.array_equal( R.value[0,:], np.array([3.,3.,3.,3.]) ), True )
self.assertEqual( np.array_equal( R.value[1,:], np.array([1.,1.,1.,1.]) ), True )
# check sum is preserved:
self.assertEqual( np.sum(K.value), np.sum(R.value) )
def testX_delta_dist_method(self):
""" Test the XCoord delta_dist method
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
K = xcoord1.delta_dist(ycoord1)
# This must be a 2D Field:
self.assertEqual(K.shape, (7,4))
# test some values
self.assertAlmostEqual(K[1,0], 5002986.3008417469, places =3 )
self.assertAlmostEqual(K[3,0], 10005972.601683492, places =3 )
# kind of "checksum"
self.assertAlmostEqual(np.sum(K.value), 149371192.51449975, places =3 )
# distances between all consecutive points around circle must be same
# test whether constant in this direction, and no nan:
K2 = K - K[1,0]
idx = K2.value == 0
self.assertEqual( idx[1,:].all(), True )
# I want more tests in this area. Also more tests to indicate that actual calculations (not just code logic) are correct.
def testX_der_method(self):
""" Test the XCoord der method
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
K = xcoord1.delta_dist(ycoord1)
R=xcoord1.der(K,ycoord1)
# This must be a 2D Field:
self.assertEqual(R.shape, (7,4))
Idx = R.value == 0.
# result must be all zero
self.assertEqual(Idx.all(),True)
# note that xcoord2 is identical to xcoord1, but not the same object in memory, hence error:
self.assertRaises(ValueError, xcoord2.der, **{'F':K,'y_coord':ycoord1})
def testX_dist_method(self):
""" Test the XCoord dist method
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
K = xcoord1.dist(ycoord1)
# This must be a 2D Field:
self.assertEqual(K.shape, (7,4))
# test some value:
self.assertAlmostEqual(K[2,2], 17330852.925257955 , places =3 )
# kind of "checksum"
self.assertAlmostEqual(np.sum(K.value), 224056788.77174962 , places =3 )
R = xcoord1.der(K,ycoord1)
dR = R.value[:,1:] - 1.
# result must be all be ~zero
self.assertEqual(np.max(dR) < 1e-15,True)
dR = R.value[:,:1] -3.
# result must be all be ~zero
self.assertEqual(np.max(dR) < 1e-15,True)
# note that xcoord2 is identical to xcoord1, but not the same object in memory, hence error:
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
self.assertRaises(ValueError, xcoord2.der, **{'F':K,'y_coord':ycoord1})
def testX_d_method(self):
""" Test the XCoord d method
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
xcoord1_edges = sg.fieldcls.XCoord(name = 'testx_edges',direction ='X',value = np.arange(0.,360+45.,90.) -45. , dual = xcoord1 )
# ycoord1_edges = sg.fieldcls.YCoord(name = 'testy_edges',direction ='Y',value = np.arange(-90.+y_step/2,90.,y_step) , dual = ycoord1 )
K = xcoord1.d(ycoord1)
# This must be a 2D Field:
self.assertEqual(K.shape, (7,4))
self.assertAlmostEqual(np.sum(K.value), 149371192.51449975 , places =3 )
def testX_vol_method(self):
""" Test the XCoord vol method.
Might want to extend this with the introduction of new derived Coord classes.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
xcoord1_edges = sg.fieldcls.XCoord(name = 'testx_edges',direction ='X',value = np.arange(0.,360+45.,90.) -45. , dual = xcoord1 )
# ycoord1_edges = sg.fieldcls.YCoord(name = 'testy_edges',direction ='Y',value = np.arange(-90.+y_step/2,90.,y_step) , dual = ycoord1 )
# a YCoord must be in the grid:
self.assertRaises(RuntimeError, xcoord1.vol, xcoord1**2 )
# Now a YCoord is in the grid:
K = xcoord1.vol(ycoord1*xcoord1)
# This must be a 2D Field:
self.assertEqual(K.shape, (7,4))
self.assertEqual( np.array_equal( K.value, xcoord1.d(ycoord1).value ) , True)
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
# identically valued coord2 is a different object to those in the grid, hence no go:
self.assertEqual(xcoord2.vol(ycoord1*xcoord1) , None )
def test_Xsame_method_yields_same(self):
"""
Test whether making a copy with no arguments passed to .copy method yields a Coord object that is the same (with respect to .same method) as the original (although a different object in memory).
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy()
self.assertEqual(coord3.same(coord3_copy),True )
def testX_same_method_yields_not_same_for_case_array(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy(value = np.array([5,6,7]))
self.assertEqual(coord3.same(coord3_copy), False )
def testX_same_method_yields_not_same_for_case_name(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy(name = 'joep')
self.assertEqual(coord3.same(coord3_copy), False )
def testX_same_method_yields_not_same_for_case_axis(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy(axis = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def testX_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
def testX_same_method_yields_not_same_for_case_direction(self):
"""
Test whether making a copy with 1 argument passed to .copy method yields a Coord object that is NOT the same (with respect to .same method) as the original (and a different object in memory).
Note that in general, the .same method tests for:
self.array_equal(other)
self.name == other.name
self.axis == other.axis
self.direction == other.direction
"""
cstack1 = self.fixture[4]
coord3 = cstack1[2]
coord3_copy = coord3.copy(direction = 'Z')
self.assertEqual(coord3.same(coord3_copy), False )
# -----------------------
# ------- further general Coord tests --------
def test_sort(self):
cstack1 = self.fixture[0]
coord3 = cstack1[2]
coord3.sort()
value = copy.deepcopy(coord3.value)
value.sort()
self.assertEqual( np.array_equal(coord3.value , value ) ,True )
def test_equality_relation_weaksame(self):
""""
Does the &-relationship yield equality?
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
# First two coord objects should have same content
self.assertEqual(cstack1[0].weaksame(cstack2[0]), True)
def test_inequality_relation_weaksame(self):
""""
Does the &-relationship yield inequality?
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
# These two coord objects are not the same
self.assertEqual(cstack1[0].weaksame(cstack2[1]), False)
def test_equality_relation_weaksame_grid(self):
""""
Does the weaksame-relationship yield equality for multiple-member object?
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
# First two coord objects should have same content
self.assertEqual( (cstack1[0]*cstack1[1]).weaksame(cstack2[0]*cstack2[1]), True)
def test_inequality_relation_weaksame_grid(self):
""""
Does the weaksame-relationship yield inequality for multiple-member object?
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
# First two coord objects should have same content
self.assertEqual( (cstack1[0]*cstack1[1]).weaksame(cstack2[1]*cstack2[0]), False)
self.assertEqual( (cstack1[0]*cstack1[1]).weaksame(cstack2[1]*cstack2[2]), False)
# ----- some make_axes related tests:
def test_equality_relation_find_equal_axes(self):
""""
Does the function find_equal_axes recognise equivalent coord objects in the two cstacks and replace the elements of the 2nd stack accordingly?
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
# this should remove all redundant coord objects with respect to &-equality
sg.find_equal_axes(cstack1,cstack2)
self.assertEqual(cstack1,cstack2)
def test_make_axes_function_type_output(self):
"""
The output should be a list of Ax objects ([X,Y] expected, see below)
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
self.assertEqual(isinstance(sg.make_axes(cstack1 + cstack2)[0],sg.Ax ) , True )
def test_make_axes_function_output_expected(self):
"""
The test coords contain only X and Y direction Ax objects
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
self.assertEqual(str( sg.make_axes(cstack1 + cstack2) ) , '[X, Y]' )
def test_make_axes_function_no_output_expected(self):
"""
Calling make_axes twice should not yield further output
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
sg.make_axes(cstack1 + cstack2)
self.assertEqual(str( sg.make_axes(cstack1 + cstack2) ) , '[]' )
class TestAxAndAxGr(unittest.TestCase):
# ----- for Ax and AxGr objects (not using fixture)
def test_equality_relation_weaksame_grid(self):
""""
Does the weaksame-relationship yield equality for multiple-member object?
"""
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
X2 = sg.fieldcls.Ax('X')
Y2 = sg.fieldcls.Ax('Y')
# First two coord objects should have same content
self.assertEqual( (X*Y).weaksame(Y*X), False)
self.assertEqual( (Y*X).weaksame(Y2*X2), True)
# ----- for Ax and AxGr objects (not using fixture)
def test_copy_AxGr(self):
""""
Test copy method of AxGr
"""
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
ag_copy = (X*Y).copy()
self.assertEqual( ag_copy.__repr__(), '(X,Y,)')
def test_eq_in_AxGr(self):
""""
Test eq_in method of AxGr
"""
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
Z = sg.fieldcls.Ax('Z')
self.assertEqual( (X*Y).eq_in(X), True )
self.assertEqual( (X*Y).eq_in(Z), False )
X2 = sg.fieldcls.Ax('X2')
self.assertEqual( (X*Y).eq_in(X2), False )
X2.make_equiv(X)
self.assertEqual( (X*Y).eq_in(X2), True )
def test_eq_index_AxGr(self):
""""
Test eq_index method of AxGr
"""
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
Z = sg.fieldcls.Ax('Z')
self.assertEqual( (X*Y).eq_index(Y), 1 )
self.assertEqual( (X*Y).eq_index(Z), -1 )
X2 = sg.fieldcls.Ax('X2')
self.assertEqual( (X*Y).eq_index(X2), -1 )
X2.make_equiv(X)
self.assertEqual( (X*Y).eq_index(X2), 0 )
def test_eq_perm_AxGr(self):
""""
Test eq_perm method of AxGr
"""
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
Z = sg.fieldcls.Ax('Z')
self.assertEqual( (X*Y).eq_perm(Y*X), (1,0) )
self.assertEqual( (X*Y).eq_perm(Y*Z), None )
X2 = sg.fieldcls.Ax('X2')
self.assertEqual( (X*Y).eq_perm(Y*X2), None )
X2.make_equiv(X)
self.assertEqual( (X*Y).eq_perm(Y*X2), (1,0) )
def test_ax_div_mult(self):
# set up some independent axes to test on:
a1 = sg.fieldcls.Ax(name='a1')
a2 = sg.fieldcls.Ax(name='a2')
a3 = sg.fieldcls.Ax(name='a3')
a4 = sg.fieldcls.Ax(name='a4')
self.assertEqual(len(a1*a2*a3),3)
self.assertEqual((a1*a2*a3)/a2 , a1*a3 )
self.assertEqual((a1*a3)/a2 , a1*a3 )
class TestGr(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
def provide_axis(cstack):
for i, c in enumerate(cstack):
cstack[i].axis = cstack[i].direction
return cstack
# Note that some coord values are deliberately unordered.
# Coords ---
coord1 = sg.fieldcls.Coord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
coord2 = sg.fieldcls.Coord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
coord3 = sg.fieldcls.Coord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
coord4 = sg.fieldcls.Coord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
coord5 = sg.fieldcls.Coord(name = 'test2',direction ='Y',value =np.array([1,2,3, 4]), metadata = {'hi':10})
coord6 = sg.fieldcls.Coord(name = 'test',direction ='X',value =np.array([5,1,2,3, 4]), metadata = {'hi':12})
# providing coord1 and coord2 with duals. coord3 is self-dual
coord1_edges = sg.fieldcls.Coord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = coord1 , metadata = {'hi':25} )
coord2_edges = sg.fieldcls.Coord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = coord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
coord4_edges = sg.fieldcls.Coord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = coord4 , metadata = {'hi':25} )
coord5_edges = sg.fieldcls.Coord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = coord5, metadata = {'hi':77})
# YCoords ---
ycoord1 = sg.fieldcls.YCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
ycoord2 = sg.fieldcls.YCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
ycoord3 = sg.fieldcls.YCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
ycoord4 = sg.fieldcls.YCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
ycoord5 = sg.fieldcls.YCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3., 4.]), metadata = {'hi':10})
ycoord6 = sg.fieldcls.YCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3., 4.]), metadata = {'hi':12})
ycoord1_edges = sg.fieldcls.YCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = ycoord1 , metadata = {'hi':25} )
ycoord2_edges = sg.fieldcls.YCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = ycoord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
ycoord4_edges = sg.fieldcls.YCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = ycoord4 , metadata = {'hi':25} )
ycoord5_edges = sg.fieldcls.YCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = ycoord5, metadata = {'hi':77})
# XCoords ---
xcoord1 = sg.fieldcls.XCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
xcoord2 = sg.fieldcls.XCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
xcoord3 = sg.fieldcls.XCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
# identical in main attributes to previous set (in order):
xcoord4 = sg.fieldcls.XCoord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]), metadata = {'hi':8})
xcoord5 = sg.fieldcls.XCoord(name = 'test2',direction ='Y',value =np.array([1.,2.,3., 4.]), metadata = {'hi':10})
xcoord6 = sg.fieldcls.XCoord(name = 'test',direction ='X',value =np.array([5.,1.,2.,3., 4.]), metadata = {'hi':12})
xcoord1_edges = sg.fieldcls.XCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = xcoord1 , metadata = {'hi':25} )
xcoord2_edges = sg.fieldcls.XCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = xcoord2, metadata = {'hi':77})
# identical in main attributes to previous set (in order):
xcoord4_edges = sg.fieldcls.XCoord(name = 'test1_edges',direction ='X',value =np.array([0.5,1.5,2.5,3.5]), dual = xcoord4 , metadata = {'hi':25} )
xcoord5_edges = sg.fieldcls.XCoord(name = 'test2_edges',direction ='Y',value =np.array([0.5,1.5,2.5,3.5,4.5]), dual = xcoord5, metadata = {'hi':77})
# we are testing for Coord, YCoord and XCoord
cstack1 = provide_axis([coord1,coord2,coord3,coord1_edges,coord2_edges])
cstack2 = provide_axis([coord4,coord5,coord6,coord4_edges,coord5_edges])
ycstack1 = provide_axis([ycoord1,ycoord2,ycoord3,ycoord1_edges,ycoord2_edges])
ycstack2 = provide_axis([ycoord4,ycoord5,ycoord6,ycoord4_edges,ycoord5_edges])
xcstack1 = provide_axis([xcoord1,xcoord2,xcoord3,xcoord1_edges,xcoord2_edges])
xcstack2 = provide_axis([xcoord4,xcoord5,xcoord6,xcoord4_edges,xcoord5_edges])
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
coord1.axis = X
coord2.axis = Y
coord4.axis = X
coord5.axis = Y
self.fixture = [cstack1, cstack2, ycstack1, ycstack2,xcstack1, xcstack2,]
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_copy_Gr(self):
""""
Test copy method of Gr
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
gr_copy = (coord1*coord2).copy()
self.assertEqual( gr_copy.__repr__(), '(test1, test2)')
def test_Gr_array_equal_method(self):
""""
Test array_equal method of Gr
"""
cstack1 = self.fixture[0]
cstack2 = self.fixture[1]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
self.assertEqual( (coord1*coord2).array_equal(coord4*coord5), [True ,True] )
def test_Gr_axis_method(self):
""""
Test axis method of Gr
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
self.assertEqual( (coord1*coord2).axis().__repr__(), '(X,Y,)')
def test_Gr_axis_method(self):
"""
Test axis method of Coord.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
X = sg.fieldcls.Ax('X')
Y = sg.fieldcls.Ax('Y')
coord1.give_axis(X)
coord2.give_axis(Y)
self.assertEqual( (coord1*coord2).axis() , X*Y )
def test_Gr_reverse_method(self):
"""
Test reverse method of Coord.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
self.assertEqual( (coord1*coord2).reverse() , coord2*coord1 )
def test_Gr_is_equiv_method(self):
"""
Test is_equiv method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
cstack2 = self.fixture[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
self.assertEqual( (coord1*coord2).is_equiv(coord5*coord4) , False )
coord1.make_equiv(coord4)
coord2.make_equiv(coord5)
self.assertEqual( (coord1*coord2).is_equiv(coord5*coord4) , True )
def test_Gr_eq_in_method(self):
"""
Test Gr.eq_in method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
cstack2 = self.fixture[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
self.assertEqual( (coord1*coord2).eq_in(coord4) , False )
coord1.make_equiv(coord4)
# coord2.make_equiv(coord5)
self.assertEqual( (coord1*coord2).eq_in(coord4) , True )
def test_Gr_rearrange_method(self):
"""
Test Gr.rearrange method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
self.assertEqual( (coord1*coord2).rearrange([1,0]) , coord2*coord1 )
def test_Gr_perm_method(self):
"""
Test perm method of Gr.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
cstack2 = self.fixture[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
self.assertEqual( (coord1*coord2).perm(coord2*coord1) , (1,0) )
self.assertEqual( (coord1*coord2).perm(coord5*coord4) is None , True )
coord1.make_equiv(coord4)
coord2.make_equiv(coord5)
self.assertEqual( (coord1*coord2).perm(coord5*coord4) , None )
def test_Gr_eq_perm_method(self):
"""
Test eq_perm method of Gr.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
cstack2 = self.fixture[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
self.assertEqual( (coord1*coord2).eq_perm(coord2*coord1) , (1,0) )
self.assertEqual( (coord1*coord2).eq_perm(coord5*coord4) is None , True )
coord1.make_equiv(coord4)
coord2.make_equiv(coord5)
self.assertEqual( (coord1*coord2).eq_perm(coord5*coord4) , (1,0) )
def test_Gr_shape_method(self):
"""
Test Gr.shape method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
self.assertEqual( (coord1*coord2).shape() , (3,4) )
def test_Gr_ones_method(self):
"""
Test Gr.ones method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
cstack2 = self.fixture[1]
coord4 = cstack2[0]
coord5 = cstack2[1]
K = (coord1*coord2).ones()
self.assertEqual( K.value.shape , (3,4) )
def test_Gr_der_method(self):
"""
Test Gr.der method.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
# construct simple test Field:
K = xcoord1.delta_dist(ycoord1)
# take derivative:
R= (ycoord1*xcoord1).der(xcoord1, K)
# This must be a 2D Field:
self.assertEqual(R.shape, (7,4))
Idx = R.value == 0.
# Few tests to distinguish between possible problem causes:
self.assertEqual(Idx.all(),True)
self.assertEqual( np.array_equal( R.value, xcoord1.der(K,ycoord1).value ) , True )
def test_vol_method(self):
"""
Test Coord vol method.
"""
cstack1 = self.fixture[0]
coord1 = cstack1[0]
coord2 = cstack1[1]
coord3 = cstack1[2]
self.assertEqual( np.array_equal( coord1.vol(coord1*coord2).value, np.array([ 1., 1., 1.]) ), True )
self.assertRaises(ValueError, coord1.vol , coord2*coord3 )
def test__find_args_coord_method(self):
"""
Test Coord _find_args_coord method.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
# construct simple test Field:
K = xcoord1.delta_dist(ycoord1)
# take derivative:
R= (ycoord1*xcoord1).der(xcoord1, K)
# This must be a 2D Field:
self.assertEqual(R.shape, (7,4))
Idx = R.value == 0.
# Few tests to distinguish between possible problem causes:
self.assertEqual(Idx.all(),True)
self.assertEqual( np.array_equal( R.value, xcoord1.der(K,ycoord1).value ) , True )
A = (ycoord1*xcoord1)._find_args_coord({'x_coord':sg.fieldcls.XCoord,'y_coord':sg.fieldcls.YCoord,'z_coord':sg.fieldcls.Coord})
self.assertEqual(A,[[], [ycoord1]] )
A = (xcoord1*ycoord1)._find_args_coord({'x_coord':sg.fieldcls.XCoord,'y_coord':sg.fieldcls.YCoord,'z_coord':sg.fieldcls.Coord})
self.assertEqual(A,[ [ycoord1] , [] ] )
def test__values_Gr_method(self):
"""
Test values method of Gr class.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
grid = ycoord1*xcoord1
V=grid.values()
self.assertEqual(np.array_equal(V[0],grid[0].value),True)
def test__meshgrid_Gr_method(self):
"""
Test meshgrid method of Gr class.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
grid = ycoord1*xcoord1
Y,X=grid.meshgrid()
self.assertEqual(Y.shape,(4,7))
def test__call_on_members_Gr_method(self):
"""
Test call_on_members method of Gr class.
"""
y_step=30;
xcoord1 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
xcoord2 = sg.fieldcls.XCoord(name = 'testx',direction ='X',value =np.arange(0.,360.,90.) )
ycoord1 = sg.fieldcls.YCoord(name = 'testy',direction ='Y',value =np.arange(-90.,90.+y_step,y_step) )
grid = ycoord1*xcoord1
R= grid.call_on_members('__neg__') # make values negative as via -1 multiplication
# This must be a 2D Field:
self.assertEqual(R.shape(), (7,4))
self.assertEqual( np.array_equal( R[0].value
, -grid[0].value ) ,True)
self.assertEqual( np.array_equal( R[1].value
, -grid[1].value ) ,True)
# ------------- Test utilsg.py module --------------------
class TestUtilsg(unittest.TestCase):
def test_id_index_id_in_rem_equivs_functions(self):
"""Test id_index, id_in and rem_equivs from sg.utilsg.
"""
# set up some axes to test on:
a1 = sg.fieldcls.Ax(name='a1')
a2 = sg.fieldcls.Ax(name='a2')
a3 = sg.fieldcls.Ax(name='a3')
a4 = sg.fieldcls.Ax(name='a4')
# b2 is equivalent to a2, b3 to none.
b2 = sg.fieldcls.Ax(name='a2', direction ='Q', long_name='Q')
b3 = sg.fieldcls.Ax(name='b3', direction ='Q', long_name='Q')
# the tests
self.assertEqual(sg.utilsg.id_in([a1,a2,a3,a4],b2 ) , True)
self.assertEqual(sg.utilsg.id_in([a1,a2,a3,a4],b3 ) , False)
self.assertEqual(sg.utilsg.id_index([a1,a2,a3,a4],b2 ) , 1)
self.assertEqual(sg.utilsg.id_index([a1,a2,a3,a4],b3 ) , None)
self.assertEqual(sg.utilsg.rem_equivs([a1,a2,a3]+[b2,] ), [a1, a2, a3] )
def test_get_att_function(self):
"""Tests sg.utilsg.get_att
"""
# define some test class with some attributes
class Tmp(object):
test =0
test2=20
test3=30
W = Tmp()
self.assertEqual(sg.utilsg.get_att(W,['test','test2'] ), 0 )
self.assertEqual(sg.utilsg.get_att(W,['test2','test'] ), 20 )
self.assertEqual(sg.utilsg.get_att(W,['test100'] ), None )
def test_merge_function(self):
"""
Tests whether 2 test arrays are properly merged.
"""
# two test arrays to merge
A = np.array([1.,2.,3.,4.])
B = np.array([-10.,1.5,2.5,3.5,4.5,11.])
self.assertEqual( np.array_equal(sg.utilsg.merge(A,B), np.array([-10. , 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5, 11. ]) ), True )
# 3 tests for very simple function sublist in utilsg.py
# --------------
def test_sublist(self):
self.assertEqual(sg.utilsg.sublist(['test','hi'] ,'hi' ) , ['hi'])
def test_sublist_all(self):
self.assertEqual(sg.utilsg.sublist(['test','hi'] ,'*' ) , ['test','hi'])
def test_sublist_none(self):
self.assertEqual(sg.utilsg.sublist(['test','hi'] ,'ho' ) , [])
# -------------
def test_add_alias(self):
"""
Create some test coords to test the add_alias function in utilsg.py.
An alias attribute is assigned, which is the same as the name attribute unless the name appears more than once.
Two names are the same in this example, and in the created alias, the second of those two names must receive a suffix "2".
"""
coord1 = sg.fieldcls.Coord(name = 'test',direction ='X',value =np.array([1.,2.,3.]) , metadata = {'hi':5} )
coord2 = sg.fieldcls.Coord(name = 'test',direction ='Y',value =np.array([1.,2.,3.,4.]), metadata = {'hi':7})
coord3 = sg.fieldcls.Coord(name = 'test3',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':3})
coord4 = sg.fieldcls.Coord(name = 'test4',direction ='X',value =np.array([5.,1.,2.,3.,4.]), metadata = {'hi':5})
L = sg.utilsg.add_alias([coord1, coord2, coord3, coord4])
# test that alias is correct (same as names, but if the same name occurs >1 times, it is numbered)
self.assertEqual([it.alias for it in L] , ['test', 'test2', 'test3', 'test4'] )
# test that names remain the same
self.assertEqual([it.name for it in L] , ['test', 'test', 'test3', 'test4'] )
def test_find_perm_function_equal_length_permutables(self):
"""
Test whether the permutation between two permutable lists yields the right result.
"""
left = ['a','b','c']
right = ['c','a','b']
perm = sg.utilsg.find_perm(left,right)
self.assertEqual([left[i] for i in perm] , right)
def test_find_perm_function_non_equal_length(self):
"""
Test whether the permutation between two non-permutable lists yields the right result.
"""
left = ['a','b','c']
right = ['c','a']
perm = sg.utilsg.find_perm(left,right)
self.assertEqual(perm, None)
def test_find_perm_function_equal_length_non_permutables(self):
"""
Test whether the permutation between two permutable lists yields the right result.
"""
a=sg.fieldcls.Coord('a')
b=sg.fieldcls.Coord('b')
c=sg.fieldcls.Coord('c')
left = [a,b]
right = [b,c]
perm = sg.utilsg.find_perm(left,right)
self.assertEqual(perm, None)
def test_simple_glob_function_left_wildcard(self):
self.assertEqual(sg.utilsg.simple_glob(['foo','bar'],'*oo' ), ['foo'] )
def test_simple_glob_function_right_wildcard(self):
self.assertEqual(sg.utilsg.simple_glob(['foo','bar'],'oo*' ), ['foo'] )
def test_simple_glob_function_right_wildcard(self):
self.assertEqual(sg.utilsg.simple_glob(['foo','bar','vroom'],'*oo*' ), ['foo','vroom'] )
def test_simple_glob_function_no_wildcard(self):
self.assertEqual(sg.utilsg.simple_glob(['foo','bar','vroom'],'oo' ), [] )
def test_end_of_filepath_function(self):
self.assertEqual(sg.utilsg.end_of_filepath('/test/foo/bar'), 'bar')
self.assertEqual(sg.utilsg.end_of_filepath('/foo/bar/'), 'bar')
self.assertEqual(sg.utilsg.end_of_filepath('foo/bar/'), 'bar')
class TestExper(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);
#P.load('O_temp')
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_load_method_non_existent_var(self):
P = self.fixture
E = P['DPO']
varname = 'this_doesnt_exist'
# attempt to load non-existent field
P.load(varname)
self.assertEqual(len(E.vars),0)
def test_load_method_existent_var(self):
P = self.fixture
E = P['DPO']
varname = 'O_temp'
# attempt to load non-existent field
P.load(varname)
self.assertEqual(len(E.vars),1)
def test_load_method_multiple_existent_var(self):
P = self.fixture
E = P['DPO']
varnames = ['A_sat', 'A_slat' ]
# attempt to load non-existent field
P.load(varnames)
self.assertEqual(len(E.vars),2)
def test_get_function_of_Exper_not_loaded(self):
# try to get a Field that has not been loaded yet from the Exper object => None returned.
E = self.fixture['DPO']
self.assertEqual(E.get('O_temp'),None)
def test_get_of_Exper(self):
# try to get a Field that has been loaded from the Exper object => Field object.
E = self.fixture['DPO']
self.fixture.load('O_temp')
self.assertEqual(str(E.get('O_temp')), 'O_temp')
def test_delvar_method_of_Exper(self):
# try to delete a Field that has been loaded from the Exper object
E = self.fixture['DPO']
self.fixture.load(['O_temp','O_sal','A_sat','A_shum'])
E.delvar('O_temp')
self.assertEqual(E.get('O_temp') is None, True)
E.delvar(['O_sal','A_sat'])
self.assertEqual(E.get('O_sal') is None, True)
self.assertEqual(E.get('A_sat') is None, True)
del E['A_shum']
self.assertEqual(E.get('A_shum') is None, True)
# tests around coord and grid aspects of fields
class TestCoordField(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);P.load('O_temp')
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_field_grid_len(self):
self.assertEqual(len(self.fixture['DPO']['O_temp'].grid),3)
def test_field_shape(self):
self.assertEqual(self.fixture['DPO']['O_temp'].shape,self.fixture['DPO']['O_temp'].grid.shape())
def test_coord(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( latitude*(longitude*latitude) , longitude*latitude )
def test_coord_mult2(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( latitude_edges*(longitude*latitude) , longitude*latitude_edges )
def test_coord_div(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( (longitude*latitude)/longitude , latitude**2 )
def test_coord_dual(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( longitude.dual, longitude_edges )
def test_coord_mul_field(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( (longitude*self.fixture['DPO']['O_temp']).shape, (19,100) )
def test_coord_div_field(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( (self.fixture['DPO']['O_temp'] / longitude).shape, (19,100) )
def test_coord_2D_div_field(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual( (self.fixture['DPO']['O_temp'] / (longitude*latitude ) ).shape, (19,) )
def test_ax_mul_field(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
self.assertEqual( (X*self.fixture['DPO']['O_temp'] ).shape, (19, 100) )
def test_can_I_divide_field_by_ax_shape(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
self.assertEqual( (self.fixture['DPO']['O_temp'] / X ).shape, (19, 100) )
def test_can_I_divide_field_by_ax2D_shape(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
self.assertEqual( (self.fixture['DPO']['O_temp'] / (X*Y ) ).shape, (19,) )
def test_avg_temp_value(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
self.assertAlmostEqual( self.fixture['DPO']['O_temp']/ (X*Y*Z) , 3.9464440090035104 , places =2)
def test_field_derivative_units(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
self.assertEqual(latitude.der(TEMP).units,u'C/m')
def test_avg_temp_masked_value(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# Choose node inside the Atlantic. This will call the floodfill functions on the Atlantic, creating a mask used to block out the rest of the ocean using maskout, and so allow computation of the Average Atlantic temperature.
self.assertAlmostEqual( self.fixture['DPO']['O_temp'][Y,33:].maskout( node = (X,85,Y,30) )/ (X*Y*Z) , 4.788920703061341 , places =2)
def test_field_and_grid_mean_method(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
gr = latitude*longitude
A = TEMP.mean(gr)
B = gr.mean(TEMP)
self.assertAlmostEqual( (A.value-B.value).sum() , 0.,7 )
def test_field_and_grid_mean_method(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
gr = latitude*longitude
with self.assertRaises( ValueError):
TEMP[gr]
def test_field_regrid_method(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
# this takes slices orthogonal to the argument grid (depth**2) and returns them as a list
L=TEMP.regrid(depth**2)
self.assertEqual(len(L ), 19 )
self.assertEqual(isinstance(L, list),True )
def test_field_transpose_method(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
# test the reverse method on grids first, before we use it:
self.assertEqual( (latitude*longitude).reverse() == longitude*latitude , True )
TEMP = self.fixture['DPO']['O_temp']
SAT = self.fixture['DPO']['A_sat']
TEMP_t = TEMP.transpose()
self.assertEqual(TEMP_t.grid, TEMP.grid.reverse() )
# need to get rid of the nans to use np.array_equal
val1 = TEMP_t.value
val1[np.isnan(val1)] = -999.
val2 = TEMP.value.transpose()
val2[np.isnan(val2)] = -999.
self.assertEqual(np.array_equal(TEMP_t.value, TEMP.value.transpose() ), True )
new_grid = latitude*depth*longitude
val1 = TEMP.transpose(new_grid).value
val1[np.isnan(val1)] = -999.
val2 = TEMP.regrid(new_grid).value
val2[np.isnan(val2)] = -999.
self.assertEqual(np.array_equal(val1 , val2 ), True )
def test_avg_temp_value_after_regrid(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# load velocity to get the velocity grid
self.fixture.load('O_velX')
TEMP_regrid = self.fixture['DPO']['O_temp'].regrid(self.fixture['DPO']['O_velX'].grid)
self.assertAlmostEqual( TEMP_regrid/ (X*Y*Z) , 4.092108709111132 , places =2)
def test_squeezed_dims_worked_on_loading(self):
self.assertEqual( len(self.fixture['DPO']['O_temp'].squeezed_dims) , 1 )
def test_if_unsqueezing_adds_dims(self):
self.assertEqual( len( (sg.unsqueeze(self.fixture['DPO']['O_temp']) ).grid ) , 4 )
def test_if_unsqueezing_removes_squeezed_dims(self):
self.assertEqual( len( (sg.unsqueeze(self.fixture['DPO']['O_temp']) ).squeezed_dims ) , 0 )
def test_Gr_squeeze_method(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
G = TEMP.grid
self.assertEqual( len(G.squeeze()[0] ) , 3 )
G = (TEMP[Y,50]).grid
self.assertEqual( len(G.squeeze()[0] ) , 2 )
G = (TEMP[Z,0,X,10]).grid
self.assertEqual( len(G.squeeze()[0] ) , 1 )
def test_squeeze_multiple_1dim(self):
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
TEMP = self.fixture['DPO']['O_temp']
K=TEMP[Y,50]
self.assertEqual( (sg.squeeze(K)).shape , (19, 100) )
K=TEMP[Z,0,X,10]
self.assertEqual( (sg.squeeze(K)).shape , (100,) )
class TestFieldBasic(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);
P.load(['O_temp','A_sat'])
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_slice_NH(self):
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
SAT_sliced = SAT[Y,:50]
self.assertEqual( SAT_sliced.shape , (50,100) )
def test_slice_one_lat(self):
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
SAT_sliced = SAT[Y,50]
self.assertEqual( SAT_sliced.shape , (1,100) )
def test_slice_gridequiv_can_i_add(self):
SAT1 = self.fixture['DPO']['A_sat']
SAT2 = self.fixture['DPC']['A_sat']
for c in self.fixture['DPO'].axes: # get the axes into the namespace
exec c.name + ' = c'
SAT1_sliced = SAT1[Y,10:]
SAT2_sliced = SAT2[Y,10:]
dSAT = SAT1_sliced - SAT2_sliced
self.assertEqual( dSAT.shape , (90,100) )
def test_slice_everything(self):
""" Slicing with : should yield the value attribute, an ndarray
"""
SAT = self.fixture['DPO']['A_sat']
SAT_sliced = SAT[:]
self.assertEqual( isinstance(SAT_sliced, np.ndarray) , True )
def test_concatenate_arg_ax_None(self):
"""
Test the sg.concatenate function with ax argument None.
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
SAT1 = SAT[Y,:40]
SAT2 = SAT[Y,40:55]
SAT3 = SAT[Y,55:]
SAT_combined = sg.concatenate((SAT1,SAT2,SAT3))
self.assertEqual( SAT_combined.shape , (100,100) )
def test_concatenate_arg_ax_not_in_grid(self):
"""
Test the sg.concatenate function with ax argument that points in a different axis direction from the grid. This should lead to a new Coord object that is added to the result grid and that we can examine.
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
SAT1 = SAT[Y,:50]
SAT2 = SAT[Y,50:]
# Create test Coord to concatenate along.
W = sg.Ax('W')
SAT_combined = sg.concatenate([SAT1,SAT2 ], ax = W )
self.assertEqual( SAT_combined.shape , (2,50,100) )
# concatenate has created a new Coord:
self.assertEqual( np.array_equal(SAT_combined.grid[0].value, np.array([0.,1.]) ), True )
def test_concatenate_arg_new_coord_given(self):
"""
Test the sg.concatenate function with new_coord argument an indpendendent Coord.
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
SAT1 = SAT[Y,:50]
SAT2 = SAT[Y,50:]
# Create test Coord to concatenate along.
W = sg.Ax('W')
w = sg.Coord('w' , axis = W, direction = 'W', value = np.array([0,1]))
SAT_combined = sg.concatenate([SAT1,SAT2 ], new_coord = w )
self.assertEqual( SAT_combined.shape , (2,50,100) )
class TestVectorField(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);
P.load(['O_velX','O_velY','O_temp'])
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_slice(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
U = self.fixture['DPO']['O_velX']
V = self.fixture['DPO']['O_velY']
TEMP = self.fixture['DPO']['O_temp']
# just to speed up multiplication (otherwise regridding takes place):
TEMP.grid = U.grid
UV = U*V
# Did multiplication yield a 2D vectorfield?
self.assertEqual(len(UV),2)
# scalar field with vector component should yield Field
self.assertEqual(isinstance(TEMP*V, sg.fieldcls.Field),True)
# check that vcumsum and vsum propagate to the Field members of VField:
Ucs = U.vcumsum(coord = latitude_V)
UVcs = UV.vcumsum(coord = latitude_V)
R1 = Ucs.value
R2 = UVcs[0].value
R1[np.isnan(R1)] = 0
R2[np.isnan(R2)] = 0
self.assertEqual(np.array_equal(R1,R2) ,True )
# test whether methods work on members
Ucs = U.vsum_weighted( )
UVcs = UV.vsum_weighted( )
R1 = Ucs
R2 = UVcs[0]
self.assertEqual(R1,R2)
Ucs = U.vsum( )
UVcs = UV.vsum( )
R1 = Ucs
R2 = UVcs[0]
self.assertEqual(R1,R2)
class TestGrid(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);
P.load(['O_temp','A_sat'])
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_division(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
self.assertEqual((latitude*longitude)/X,latitude**2)
def test_inflate(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
Igr = (depth*latitude*longitude).inflate()
self.assertEqual(Igr[0].shape, (19, 100, 100))
def test_grid_empty_grid_equal(self):
self.assertEqual(sg.Gr() == sg.Gr(), True)
def test_grid_sliced_method(self):
# Corresponds to CASE 1a in equal length grid case in fieldcls.py source code.
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
gr1 = depth*latitude*longitude
gr1_sliced = gr1.sliced((X,slice(1,None,None)) )
self.assertEqual(gr1_sliced.shape(), (19, 100, 99) )
self.assertEqual(gr1_sliced[0] is depth, True )
# Try single slab slice:
gr1_sliced = gr1.sliced((X,10))
self.assertEqual(gr1_sliced.shape(), (19,100,1) )
def test_grid_permute_function_equal_len_and_coords(self):
# Corresponds to CASE 1a in equal length grid case in fieldcls.py source code.
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*longitude
gr2 = longitude*depth
# define a np array consistent with gr1
A = np.ones( gr1.shape() )
# gr1(gr2) should yield a function transposing ndarrays consistent with gr1 to ndarrays consistent with gr2
self.assertEqual((gr1(gr2)(A)).shape, gr2.shape() )
def test_grid_permute_function_equal_len_equiv_coords_only(self):
# Corresponds to CASE 1b in equal length grid case in fieldcls.py source code.
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
# This time, we are going to a new grid that requires interpolation (on longitude).
gr1 = depth*longitude
gr2 = longitude_V*depth
# define a np array consistent with gr1
A = np.ones( gr1.shape() )
# gr1(gr2) should yield a function transposing ndarrays consistent with gr1 to ndarrays consistent with gr2, and interpolated onto it.
self.assertEqual((gr1(gr2)(A)).shape, gr2.shape() )
def test_grid_permute_function_equal_len_equiv_coords_only(self):
# Corresponds to CASE 1c in equal length grid case in fieldcls.py source code.
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
# This time, we are going to a new grid that is incompatible, leading to a None result.
gr1 = depth*longitude
gr2 = latitude*depth
self.assertEqual(gr1(gr2), None )
def test_gr_interpret_slices_function(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# This time, we are going to a new grid that is incompatible, leading to a None result.
gr1 = depth*latitude*longitude
gr2 = latitude*longitude
slNone = slice(None, None, None)
self.assertEqual(sg.interpret_slices((longitude,10),gr1) == (slNone, slNone, slice(10,11,None) ) , True )
self.assertEqual(sg.interpret_slices((X,10),gr1) == (slNone, slNone, slice(10,11,None) ) , True )
self.assertEqual(sg.interpret_slices((longitude,10),gr1, others = slice(1,None,None)) == (slice(1,None,None), slice(1,None,None), slice(10,11,None) ) , True )
self.assertEqual(sg.interpret_slices((X,1,Y,10),latitude*longitude) == (slice(10, 11, None), slice(1, 2, None)) , True )
self.assertEqual(sg.interpret_slices((X,1,Y,10),latitude*longitude, as_int = True) == (10, 1) , True )
# self.assertEqual(sg.interpret_slices(10 , slNone, 10 )
# self.assertEqual( sg.interpret_slices((slNone, slNone),G) , (slNone, slNone) )
def test_gr_method_expand_size(self):
"""
Test expand method of fieldcls.py
SAT = P['DPO']['A_sat']
SAT.shape is (100,100)
W=SAT.grid.expand(SAT[:],depth**2)
W.shape is (19,100,100)
W contains 19 identical copies (slices) of SAT[:]
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
W=SAT.grid.expand(SAT[:],depth**2)
# W has been expanded, and the other grid (depth**2) should be appended on the left side.
self.assertEqual(W.shape, (19,100,100) )
def test_gr_method_expand_broadcast(self):
"""
Test expand method of fieldcls.py
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
W=SAT.grid.expand(SAT[:],depth**2)
# W contains 19 identical copies (slices) of SAT[:]
K=W[:,50,50]
self.assertEqual((K == K[0]).all() , True )
def test_call_small_gr_on_big_gr(self):
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# need to do slice test earlier.
SAT2 = SAT[Y,:50]
gr1 = SAT2.grid
gr2 = depth*SAT2.grid
A = SAT2[:]
B = gr1(gr2)(A)
self.assertEqual(B.shape , (19, 50, 100) )
def test_call_small_gr_on_big_gr_permute(self):
"""
corresponds to case 2a of gr class call method in fieldcls.py
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# need to do slice test earlier.
SAT2 = SAT[Y,:50]
gr1 = SAT2.grid
# note that this does something different for a single coord left multiplicant:
gr2 = (depth*longitude)*SAT2.grid
A = SAT2[:]
B = gr1(gr2)(A)
self.assertEqual(B.shape , (19, 100, 50) )
def test_call_small_gr_on_big_gr_permute_interp(self):
"""
corresponds to case 2b of gr class call method in fieldcls.py
"""
SAT = self.fixture['DPO']['A_sat']
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# need to do slice test earlier.
SAT2 = SAT[Y,:50]
gr1 = SAT2.grid
# note that this does something different for a single coord left multiplicant:
gr2 = (depth*longitude_V)*SAT2.grid
A = SAT2[:]
B = gr1(gr2)(A)
self.assertEqual(B.shape , (19, 100, 50) )
def test_call_small_gr_on_big_gr_not_equiv(self):
"""
corresponds to case 2c of gr class call method in fieldcls.py
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
self.assertEqual(depth(latitude*longitude) , None )
def test_gr_method_reduce_dim1vs3_len_list(self):
"""
Test reduce method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth**2
gr2 = depth*latitude*longitude
A = np.ones(gr2.shape() )
# should have the length of len(depth)
self.assertEqual(len(gr1.to_slices(A,gr2)) , 19 )
def test_gr_method_reduce_dim1vs3_shape_element(self):
"""
Test reduce method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth**2
gr2 = depth*latitude*longitude
A = np.ones(gr2.shape() )
# should have the shape of latitude*longitude
self.assertEqual( gr1.to_slices(A,gr2)[0].shape , (100,100) )
def test_gr_method_reduce_dim2vs3_len_list(self):
"""
Test reduce method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*latitude
gr2 = depth*latitude*longitude
A = np.ones(gr2.shape() )
# should have the length of len(depth)*len(longitude)
self.assertEqual(len(gr1.to_slices(A,gr2)) , 1900 )
def test_gr_method_to_slices_dim2vs3_shape_element(self):
"""
Test to_slices method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*latitude
gr2 = depth*latitude*longitude
A = np.ones(gr2.shape() )
# should have the shape of longitude**2
self.assertEqual( gr1.to_slices(A,gr2)[0].shape , (100,) )
def test_Gr_method_dual(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*latitude
gr1_dual = gr1.dual()
self.assertEqual(np.array_equal(gr1_dual[0].value , depth_edges.value ) , True )
def test_gr_method_vsum(self):
"""
Test vsum method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
gr1 = depth*latitude
# should have the shape of longitude**2. Construct trivial field of ones over grid.
self.assertAlmostEqual( gr1.vsum(gr1.ones() ) , 121672626836.47124 , places =2 )
# Field vsum calls grid vsum, hence they must be equal
self.assertAlmostEqual(gr1.ones().vsum(gr1) , gr1.vsum(gr1.ones()), places =3 )
# vsum_weighted must yield the same as vsum over the entire grid (but not subgrid)
self.assertAlmostEqual(gr1.ones().vsum_weighted(gr1) , gr1.vsum(gr1.ones()), places =3 )
# test vsum for subgrids
self.assertEqual( np.array_equal(gr1.ones().vsum(latitude**2).value , (latitude**2).vsum(gr1.ones()).value) , True )
# single coord argument same as grid with that single coord
self.assertEqual( np.array_equal(gr1.ones().vsum(latitude).value , (latitude**2).vsum(gr1.ones()).value) , True )
# should be able to call vsum method on Ax objects
self.assertEqual( np.array_equal(gr1.ones().vsum(latitude).value , Y.vsum(gr1.ones()).value) , True )
def test_gr_method_vcumsum(self):
"""
Test vsum method of gr class
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
gr1 = depth*latitude
# these methods are equivalent
self.assertEqual( np.array_equal(gr1.ones().vcumsum(latitude).value , latitude.vcumsum(gr1.ones()).value) , True )
# should be able to call vsum method on Ax objects
self.assertEqual( np.array_equal(gr1.ones().vcumsum(latitude).value , Y.vcumsum(gr1.ones()).value) , True )
def test_gr_method__find_args_coord(self):
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
ctypes = {'x_coord':sg.XCoord,'y_coord':sg.YCoord,'z_coord':sg.fieldcls.Coord}
self.assertEqual((latitude*longitude)._find_args_coord(coord_types = ctypes) ,
[[], [latitude]] )
def test_gr_method_der_type(self):
"""
Test der method of gr class to see whether it returns a Field
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = longitude*latitude
# should have the shape of longitude**2
self.assertEqual( isinstance( gr1.der(longitude,gr1.ones() ) , sg.Field ) , True )
def test_gr_method_der_X(self):
"""
Test der method of gr class to see whether it returns a Field
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = longitude*latitude
W = gr1.der(longitude,gr1.ones() )
W.value[np.isnan(W.value)]=1
# should have the shape of longitude**2
self.assertEqual( W.value.sum() , 0.0 )
def test_gr_method_der_Y(self):
"""
Test der method of gr class to see whether it returns a Field
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*latitude
W = gr1.der(latitude,gr1.ones() )
W.value[np.isnan(W.value)]=1
# should have the shape of longitude**2
self.assertEqual( W.value.sum() , 19.0 )
def test_gr_method_vol(self):
"""
Test volume method
"""
for c in self.fixture['DPO'].cstack:
exec c.name + ' = c'
gr1 = depth*latitude
W = gr1.vol()
# should have the shape of longitude**2
self.assertAlmostEqual( W.value.sum() , 121672626836.47124 , places = 2 )
class TesHigherFieldFunctionality(unittest.TestCase):
def setUp(self):
print 'Setting up %s'%type(self).__name__
D = sg.info_dict()
P = sg.Project(D['my_project']);P.load('F_heat')
self.fixture = P
def tearDown(self):
print 'Tearing down %s'%type(self).__name__
del self.fixture
def test_meridional_heat_transport(self):
P = self.fixture
for c in self.fixture['DPO'].axes:
exec c.name + ' = c'
# obtain oceanic heat flux as sg field object HF from project.
HF = P['DPO']['F_heat']
HF2 = P['DPC']['F_heat']
PHT = Y|(HF*X)*1e-15
PHT2 = Y|(HF2*X)*1e-15
self.assertEqual(PHT.shape, (100,))
self.assertEqual(PHT2.shape, (100,))
# --------- run the classes ------------
if __name__ == '__main__':
unittest.main()
|
willo12/spacegrids
|
tests/tests.py
|
Python
|
bsd-3-clause
| 100,961
|
[
"Gaussian"
] |
450a5078e6097a37de22257f85bd397a93c7cccac09f22cf70bc33a18954aaeb
|
# Copyright (C) 2013,2014 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates the file doxy-features
#
from __future__ import print_function
import inspect, sys, os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', '..', 'src'))
import featuredefs
import time
if len(sys.argv) != 3:
print("Usage: {} DEFFILE DOXYCONFIG".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, configfilename = sys.argv[1:3]
print("Reading definitions from {}...".format(deffilename))
defs = featuredefs.defs(deffilename)
print("Done.")
print("Writing {}...".format(configfilename))
configfile = file(configfilename, 'w');
configfile.write("""# WARNING: This file was autogenerated by
#
# {}
# on {}
# Do not modify it or your changes will be overwritten!
# Modify features.def instead.
#
# This file is needed so that doxygen will generate documentation for
# all functions of all features.
PREDEFINED = \\
""".format(sys.argv[0], time.asctime()))
for feature in sorted(defs.features):
configfile.write(" {} \\\n".format(feature))
configfile.close()
print("Done.")
|
olenz/espresso
|
doc/doxygen/gen_doxyconfig.py
|
Python
|
gpl-3.0
| 1,862
|
[
"ESPResSo"
] |
24c385e97a22ebf0a32a59f05258389c3f4bdfea0eb87568edca6dd45edac34d
|
#! /usr/bin/env python
import random,sys
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
def load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_balance_5000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/train_v2.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
print(len(train_fs))
print(len(train_fs[1]))
print (type(train_fs))
count=0
train_x_temp=[]
default_count=0
non_default_count = 0
while(default_count<2500 or non_default_count<2500):
randline=random.choice(train_fs)
if randline[-1]==0 and non_default_count < 2500:
non_default_count+=1
train_x_temp.append(randline)
elif randline[-1] !=0 and default_count < 2500:
default_count+=1
train_x_temp.append(randline)
print(len(train_x_temp))
print(len(train_x_temp[1]))
train_x_temp=np.array(train_x_temp).reshape(len(train_x_temp),label_index+1)
print(train_x_temp[1])
print(len(train_x_temp))
print(len(train_x_temp[1]))
print(range(1, label_index))
print(type(train_x_temp))
train_x = train_x_temp[:,range(1, label_index)]
train_y= train_x_temp[:,-1]
print (type(train_x))
print (type(train_y))
print len(train_y)
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_balance_5000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
# print len(feature_indexs)
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = svr.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# use gbm regression to predict the loss, based on the result of gbm classifier
def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05)
gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = gbr1000.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# predict the loss based on the Gaussian process regressor, which has been trained
def gp_predict(clf, x_Test):
size = len(x_Test)
part_size = 3000
cnt = (size-1) / part_size + 1
preds = []
for i in range(cnt):
if i < cnt - 1:
pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size])
else:
pred_part = clf.predict(x_Test[i*part_size: size])
preds.extend(pred_part)
return np.power(np.e,preds)
# train the gaussian process regressor
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
#Owing to out of memory, the model was trained by part of training data
#Attention, this part was trained on the ram of more than 96G
sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
ind_train = np.where(train_y>0)[0]
part_size= int(0.7 * len(ind_train))
gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
flag = (sub_x_Test_part[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp])
gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
gp_preds = np.zeros(len(sub_x_Test_part))
gp_preds[ind_tmp] = gp_preds_tmp
return gp_preds
# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
def gbc_svr_predict(gbc, train_x, train_y, test_x):
svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list,
features.feature_pair_mul_list, features.feature_pair_divide_list,
features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf,
features.feature_pair_plus_list2)
return svr_preds
# invoke the function gbc_gbr_predict_part
def gbc_gbr_predict(gbc, train_x, train_y, test_x):
gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list,
features.feature_pair_plus_list, features.feature_pair_mul_list,
features.feature_pair_divide_list, features.feature_pair_sub_mul_list,
features.feature_pair_sub_list2)
return gbr_preds
# the main function
if __name__ == '__main__':
train_fs = load_train_fs()
test_fs = load_test_fs()
train_x, train_y = train_type(train_fs)
test_x = test_type(test_fs)
gbc = gbc_classify(train_x, train_y)
svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x)
gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x)
gp_preds = gbc_gp_predict(train_x, train_y, test_x)
preds_all = svr_preds * 0.4 + gp_preds * 0.25 + gbr_preds * 0.35
output_preds(preds_all)
|
Goodideax/CS249
|
predict_balance__5000.py
|
Python
|
bsd-3-clause
| 15,523
|
[
"Gaussian"
] |
08e42986d2158bfe74942e3c74265b8bd7c5e99a17694cd26084bf580a1fceda
|
import lmfit
import numpy as np
from numpy.linalg import inv
import scipy as sp
import itertools
import matplotlib as mpl
import cmath
from collections import OrderedDict, defaultdict
from pycqed.utilities import timer as tm_mod
from sklearn.mixture import GaussianMixture as GM
from sklearn.tree import DecisionTreeClassifier as DTC
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis_v2.readout_analysis as roa
from pycqed.analysis_v2.readout_analysis import \
Singleshot_Readout_Analysis_Qutrit as SSROQutrit
import pycqed.analysis_v2.tomography_qudev as tomo
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from copy import deepcopy
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration.calibration_points import CalibrationPoints
import matplotlib.pyplot as plt
from pycqed.analysis.three_state_rotation import predict_proba_avg_ro
import traceback
import logging
from pycqed.utilities import math
from pycqed.utilities.general import find_symmetry_index
import pycqed.measurement.waveform_control.segment as seg_mod
import datetime as dt
log = logging.getLogger(__name__)
try:
import qutip as qtp
except ImportError as e:
log.warning('Could not import qutip, tomography code will not work')
class AveragedTimedomainAnalysis(ba.BaseDataAnalysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.single_timestamp = True
self.params_dict = {
'value_names': 'value_names',
'measured_values': 'measured_values',
'measurementstring': 'measurementstring',
'exp_metadata': 'exp_metadata'}
self.numeric_params = []
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
self.metadata = self.raw_data_dict.get('exp_metadata', {})
if self.metadata is None:
self.metadata = {}
cal_points = self.metadata.get('cal_points', None)
cal_points = self.options_dict.get('cal_points', cal_points)
cal_points_list = roa.convert_channel_names_to_index(
cal_points, len(self.raw_data_dict['measured_values'][0]),
self.raw_data_dict['value_names'])
self.proc_data_dict['cal_points_list'] = cal_points_list
measured_values = self.raw_data_dict['measured_values']
cal_idxs = self._find_calibration_indices()
scales = [np.std(x[cal_idxs]) for x in measured_values]
observable_vectors = np.zeros((len(cal_points_list),
len(measured_values)))
observable_vector_stds = np.ones_like(observable_vectors)
for i, observable in enumerate(cal_points_list):
for ch_idx, seg_idxs in enumerate(observable):
x = measured_values[ch_idx][seg_idxs] / scales[ch_idx]
if len(x) > 0:
observable_vectors[i][ch_idx] = np.mean(x)
if len(x) > 1:
observable_vector_stds[i][ch_idx] = np.std(x)
Omtx = (observable_vectors[1:] - observable_vectors[0]).T
d0 = observable_vectors[0]
corr_values = np.zeros(
(len(cal_points_list) - 1, len(measured_values[0])))
for i in range(len(measured_values[0])):
d = np.array([x[i] / scale for x, scale in zip(measured_values,
scales)])
corr_values[:, i] = inv(Omtx.T.dot(Omtx)).dot(Omtx.T).dot(d - d0)
self.proc_data_dict['corr_values'] = corr_values
def measurement_operators_and_results(self):
"""
Converts the calibration points to measurement operators. Assumes that
the calibration points are ordered the same as the basis states for
the tomography calculation (e.g. for two qubits |gg>, |ge>, |eg>, |ee>).
Also assumes that each calibration in the passed cal_points uses
different segments.
Returns:
A tuple of
the measured values with outthe calibration points;
the measurement operators corresponding to each channel;
and the expected covariation matrix between the operators.
"""
d = len(self.proc_data_dict['cal_points_list'])
cal_point_idxs = [set() for _ in range(d)]
for i, idxs_lists in enumerate(self.proc_data_dict['cal_points_list']):
for idxs in idxs_lists:
cal_point_idxs[i].update(idxs)
cal_point_idxs = [sorted(list(idxs)) for idxs in cal_point_idxs]
cal_point_idxs = np.array(cal_point_idxs)
raw_data = self.raw_data_dict['measured_values']
means = [None] * d
residuals = [list() for _ in raw_data]
for i, cal_point_idx in enumerate(cal_point_idxs):
means[i] = [np.mean(ch_data[cal_point_idx]) for ch_data in raw_data]
for j, ch_residuals in enumerate(residuals):
ch_residuals += list(raw_data[j][cal_point_idx] - means[i][j])
means = np.array(means)
residuals = np.array(residuals)
Fs = [np.diag(ms) for ms in means.T]
Omega = residuals.dot(residuals.T) / len(residuals.T)
data_idxs = np.setdiff1d(np.arange(len(raw_data[0])),
cal_point_idxs.flatten())
data = np.array([ch_data[data_idxs] for ch_data in raw_data])
return data, Fs, Omega
def _find_calibration_indices(self):
cal_indices = set()
cal_points = self.options_dict['cal_points']
nr_segments = self.raw_data_dict['measured_values'].shape[-1]
for observable in cal_points:
if isinstance(observable, (list, np.ndarray)):
for idxs in observable:
cal_indices.update({idx % nr_segments for idx in idxs})
else: # assume dictionaries
for idxs in observable.values():
cal_indices.update({idx % nr_segments for idx in idxs})
return list(cal_indices)
def all_cal_points(d, nr_ch, reps=1):
"""
Generates a list of calibration points for a Hilbert space of dimension d,
with nr_ch channels and reps reprtitions of each calibration point.
"""
return [[list(range(-reps*i, -reps*(i-1)))]*nr_ch for i in range(d, 0, -1)]
class Single_Qubit_TimeDomainAnalysis(ba.BaseDataAnalysis):
def process_data(self):
"""
This takes care of rotating and normalizing the data if required.
this should work for several input types.
- I/Q values (2 quadratures + cal points)
- weight functions (1 quadrature + cal points)
- counts (no cal points)
There are several options possible to specify the normalization
using the options dict.
cal_points (tuple) of indices of the calibrati on points
zero_coord, one_coord
"""
cal_points = self.options_dict.get('cal_points', None)
zero_coord = self.options_dict.get('zero_coord', None)
one_coord = self.options_dict.get('one_coord', None)
if cal_points is None:
# default for all standard Timedomain experiments
cal_points = [list(range(-4, -2)), list(range(-2, 0))]
if len(self.raw_data_dict['measured_values']) == 1:
# if only one weight function is used rotation is not required
self.proc_data_dict['corr_data'] = a_tools.rotate_and_normalize_data_1ch(
self.raw_data_dict['measured_values'][0],
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
else:
self.proc_data_dict['corr_data'], zero_coord, one_coord = \
a_tools.rotate_and_normalize_data(
data=self.raw_data_dict['measured_values'][0:2],
zero_coord=zero_coord,
one_coord=one_coord,
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
# This should be added to the hdf5 datafile but cannot because of the
# way that the "new" analysis works.
# self.add_dataset_to_analysisgroup('Corrected data',
# self.proc_data_dict['corr_data'])
class MultiQubit_TimeDomain_Analysis(ba.BaseDataAnalysis):
"""
Base class for multi-qubit time-domain analyses.
Parameters that can be specified in the options dict:
- rotation_type: type of rotation to be done on the raw data.
Types of rotations supported by this class:
- 'cal_states' (default, no need to specify): rotation based on
CalibrationPoints for 1D and TwoD data. Supports 2 and 3 cal states
per qubit
- 'fixed_cal_points' (only for TwoD, with 2 cal states):
does PCA on the columns corresponding to the highest cal state
to find the indices of that cal state in the columns, then uses
those to get the data points for the other cal state. Does
rotation using the mean of the data points corresponding to the
two cal states as the zero and one coordinates to rotate
the data.
- 'PCA': ignores cal points and does pca; in the case of TwoD data it
does PCA row by row
- 'column_PCA': cal points and does pca; in the case of TwoD data it
does PCA column by column
- 'global_PCA' (only for TwoD): does PCA on the whole 2D array
- main_sp (default: None): dict with keys qb_name used to specify which
sweep parameter should be used as axis label in plot
- functionality to split measurements with tiled sweep_points:
- split_params (default: None): list of strings with sweep parameters
names expected to be found in SweepPoints. Groups data by these
parameters and stores it in proc_data_dict['split_data_dict'].
- select_split (default: None): dict with keys qb_names and values
a tuple (sweep_param_name, value) or (sweep_param_name, index).
Stored in self.measurement_strings which specify the plot title.
The selected parameter must also be part of the split_params for
that qubit.
"""
def __init__(self,
qb_names: list=None, label: str='',
t_start: str=None, t_stop: str=None, data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True,
params_dict=None, numeric_params=None, **kwargs):
super().__init__(t_start=t_start, t_stop=t_stop, label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting, **kwargs)
self.qb_names = qb_names
self.params_dict = params_dict
if self.params_dict is None:
self.params_dict = {}
self.numeric_params = numeric_params
self.measurement_strings = {}
if self.numeric_params is None:
self.numeric_params = []
if not hasattr(self, "job"):
self.create_job(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
label=label, data_file_path=data_file_path,
do_fitting=do_fitting, options_dict=options_dict,
extract_only=extract_only, params_dict=params_dict,
numeric_params=numeric_params, **kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
self.qb_names = self.get_param_value(
'ro_qubits', default_value=self.get_param_value('qb_names'))
if self.qb_names is None:
raise ValueError('Provide the "qb_names."')
self.measurement_strings = {
qbn: self.raw_data_dict['measurementstring'] for qbn in
self.qb_names}
self.data_filter = self.get_param_value('data_filter')
self.prep_params = self.get_param_value('preparation_params',
default_value=dict())
self.channel_map = self.get_param_value('meas_obj_value_names_map')
if self.channel_map is None:
# if the new name meas_obj_value_names_map is not found, try with
# the old name channel_map
self.channel_map = self.get_param_value('channel_map')
if self.channel_map is None:
value_names = self.raw_data_dict['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
self.data_to_fit = deepcopy(self.get_param_value('data_to_fit', {}))
# creates self.sp
self.get_sweep_points()
def get_sweep_points(self):
self.sp = self.get_param_value('sweep_points')
if self.sp is not None:
self.sp = SweepPoints(self.sp)
def create_sweep_points_dict(self):
sweep_points_dict = self.get_param_value('sweep_points_dict')
hard_sweep_params = self.get_param_value('hard_sweep_params')
if self.sp is not None:
self.mospm = self.get_param_value('meas_obj_sweep_points_map')
main_sp = self.get_param_value('main_sp')
if self.mospm is None:
raise ValueError('When providing "sweep_points", '
'"meas_obj_sweep_points_map" has to be '
'provided in addition.')
if main_sp is not None:
self.proc_data_dict['sweep_points_dict'] = {}
for qbn, p in main_sp.items():
dim = self.sp.find_parameter(p)
if dim == 1:
log.warning(f"main_sp is only implemented for sweep "
f"dimension 0, but {p} is in dimension 1.")
self.proc_data_dict['sweep_points_dict'][qbn] = \
{'sweep_points': self.sp.get_sweep_params_property(
'values', dim, p)}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.sp.get_sweep_params_property(
'values', 0, self.mospm[qbn])[0]}
for qbn in self.qb_names}
elif sweep_points_dict is not None:
# assumed to be of the form {qbn1: swpts_array1, qbn2: swpts_array2}
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': sweep_points_dict[qbn]}
for qbn in self.qb_names}
elif hard_sweep_params is not None:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': list(hard_sweep_params.values())[0][
'values']} for qbn in self.qb_names}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.data_filter(
self.raw_data_dict['hard_sweep_points'])}
for qbn in self.qb_names}
def create_sweep_points_2D_dict(self):
soft_sweep_params = self.get_param_value('soft_sweep_params')
if self.sp is not None:
self.proc_data_dict['sweep_points_2D_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['sweep_points_2D_dict'][qbn] = \
OrderedDict()
for pn in self.mospm[qbn]:
if pn in self.sp[1]:
self.proc_data_dict['sweep_points_2D_dict'][qbn][
pn] = self.sp[1][pn][0]
elif soft_sweep_params is not None:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {pn: soft_sweep_params[pn]['values'] for
pn in soft_sweep_params}
for qbn in self.qb_names}
else:
if len(self.raw_data_dict['soft_sweep_points'].shape) == 1:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {self.raw_data_dict['sweep_parameter_names'][1]:
self.raw_data_dict['soft_sweep_points']} for
qbn in self.qb_names}
else:
sspn = self.raw_data_dict['sweep_parameter_names'][1:]
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {sspn[i]: self.raw_data_dict['soft_sweep_points'][i]
for i in range(len(sspn))} for qbn in self.qb_names}
if self.get_param_value('percentage_done', 100) < 100:
# This indicated an interrupted measurement.
# Remove non-measured sweep points in that case.
# raw_data_dict['soft_sweep_points'] is obtained in
# BaseDataAnalysis.add_measured_data(), and its length should
# always correspond to the actual number of measured soft sweep
# points.
ssl = len(self.raw_data_dict['soft_sweep_points'])
for sps in self.proc_data_dict['sweep_points_2D_dict'].values():
for k, v in sps.items():
sps[k] = v[:ssl]
def create_meas_results_per_qb(self):
measured_RO_channels = list(self.raw_data_dict['measured_data'])
meas_results_per_qb_raw = {}
meas_results_per_qb = {}
for qb_name, RO_channels in self.channel_map.items():
meas_results_per_qb_raw[qb_name] = {}
meas_results_per_qb[qb_name] = {}
if isinstance(RO_channels, str):
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if RO_channels in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
elif isinstance(RO_channels, list):
for qb_RO_ch in RO_channels:
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if qb_RO_ch in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
else:
raise TypeError('The RO channels for {} must either be a list '
'or a string.'.format(qb_name))
self.proc_data_dict['meas_results_per_qb_raw'] = \
meas_results_per_qb_raw
self.proc_data_dict['meas_results_per_qb'] = \
meas_results_per_qb
def process_data(self):
super().process_data()
self.data_with_reset = False
if self.data_filter is None:
if 'active' in self.prep_params.get('preparation_type', 'wait'):
reset_reps = self.prep_params.get('reset_reps', 3)
self.data_filter = lambda x: x[reset_reps::reset_reps+1]
self.data_with_reset = True
elif "preselection" in self.prep_params.get('preparation_type',
'wait'):
self.data_filter = lambda x: x[1::2] # filter preselection RO
else:
self.data_filter = lambda x: x
self.create_sweep_points_dict()
self.create_meas_results_per_qb()
# temporary fix for appending calibration points to x values but
# without breaking sequences not yet using this interface.
self.rotate = self.get_param_value('rotate', default_value=False)
cal_points = self.get_param_value('cal_points')
last_ge_pulses = self.get_param_value('last_ge_pulses',
default_value=False)
if self.get_param_value("data_type", "averaged") == "singleshot":
predict_proba = self.get_param_value("predict_proba", False)
if predict_proba and self.get_param_value("classified_ro", False):
log.warning("predict_proba set to 'False' as probabilities are"
"already obtained from classified readout")
predict_proba = False
self.process_single_shots(
predict_proba=predict_proba,
classifier_params=self.get_param_value("classifier_params"),
states_map=self.get_param_value("states_map"))
# ensure rotation is removed when single shots yield probabilities
if self.get_param_value("classified_ro", False) or predict_proba:
self.rotate = False
cal_states_rotations = {qbn: [] for qbn in self.qb_names}
try:
self.cp = CalibrationPoints.from_string(cal_points)
# for now assuming the same for all qubits.
# The cal point indices in cal_points_dict are used in MQTDA for
# plots only on data for which any preparation readout (e.g. active
# reset or preselection) has already been removed. Therefore the
# indices should only consider filtered data
self.cal_states_dict = self.cp.get_indices(
self.qb_names)[self.qb_names[0]]
cal_states_rots = self.cp.get_rotations(last_ge_pulses,
self.qb_names[0])[self.qb_names[0]] if self.rotate \
else cal_states_rotations
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=cal_states_rots)
sweep_points_w_calpts = \
{qbn: {'sweep_points': self.cp.extend_sweep_points(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'], qbn)} for qbn in self.qb_names}
self.proc_data_dict['sweep_points_dict'] = sweep_points_w_calpts
except TypeError as e:
log.error(e)
log.warning("Failed retrieving cal point objects or states. "
"Please update measurement to provide cal point object "
"in metadata. Trying to get them using the old way ...")
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=cal_states_rotations) \
if self.rotate else cal_states_rotations
self.cal_states_dict = self.get_param_value('cal_states_dict',
default_value={})
if self.get_param_value('global_PCA') is not None:
log.warning('Parameter "global_PCA" is deprecated. Please set '
'rotation_type="global_PCA" instead.')
self.rotation_type = self.get_param_value(
'rotation_type',
default_value='cal_states' if self.rotate else 'no_rotation')
# create projected_data_dict
self.data_to_fit = deepcopy(self.get_param_value('data_to_fit'))
if self.data_to_fit is None:
# If we have cal points, but data_to_fit is not specified,
# choose a reasonable default value. In cases with only two cal
# points, this decides which projected plot is generated. (In
# cases with three cal points, we will anyways get all three
# projected plots.)
if 'e' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pe' for qbn in self.qb_names}
elif 'g' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pg' for qbn in self.qb_names}
else:
self.data_to_fit = {}
# TODO: Steph 15.09.2020
# This is a hack to allow list inside data_to_fit. These lists are
# currently only supported by MultiCZgate_CalibAnalysis
for qbn in self.data_to_fit:
if isinstance(self.data_to_fit[qbn], (list, tuple)):
self.data_to_fit[qbn] = self.data_to_fit[qbn][0]
if self.rotate or self.rotation_type == 'global_PCA':
self.cal_states_analysis()
else:
# this assumes data obtained with classifier detector!
# ie pg, pe, pf are expected to be in the value_names
self.proc_data_dict['projected_data_dict'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict'][qbn].update(
{state_prob: data for key, data in data_dict.items()
if state_prob in key})
if self.cal_states_dict is None:
self.cal_states_dict = {}
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
# correct probabilities given calibration matrix
if self.get_param_value("correction_matrix") is not None:
self.proc_data_dict['projected_data_dict_corrected'] = \
OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict_corrected'][qbn] = \
OrderedDict()
probas_raw = np.asarray([
data_dict[k] for k in data_dict for state_prob in
['pg', 'pe', 'pf'] if state_prob in k])
corr_mtx = self.get_param_value("correction_matrix")[qbn]
if np.ndim(probas_raw) == 3:
assert self.get_param_value("TwoD", False) == True, \
"'TwoD' is False but data seems to be 2D"
# temporarily put 2D sweep into 1d for readout correction
sh = probas_raw.shape
probas_raw = probas_raw.reshape(sh[0], -1)
probas_corrected = np.linalg.inv(corr_mtx).T @ probas_raw
probas_corrected = probas_corrected.reshape(sh)
else:
probas_corrected = np.linalg.inv(corr_mtx).T @ probas_raw
self.proc_data_dict['projected_data_dict_corrected'][
qbn] = {key: data for key, data in
zip(["pg", "pe", "pf"], probas_corrected)}
# get data_to_fit
suffix = "_corrected" if self.get_param_value("correction_matrix")\
is not None else ""
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict' + suffix].items():
if len(prob_data) and qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = prob_data[
self.data_to_fit[qbn]]
# create msmt_sweep_points, sweep_points, cal_points_sweep_points
for qbn in self.qb_names:
if self.num_cal_points > 0:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][:-self.num_cal_points]
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-self.num_cal_points::]
else:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points']
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = []
if self.options_dict.get('TwoD', False):
self.create_sweep_points_2D_dict()
# handle data splitting if needed
self.split_data()
def split_data(self):
def unique(l):
try:
return np.unique(l, return_inverse=True)
except Exception:
h = [repr(a) for a in l]
_, i, j = np.unique(h, return_index=True, return_inverse=True)
return l[i], j
split_params = self.get_param_value('split_params', [])
if not len(split_params):
return
pdd = self.proc_data_dict
pdd['split_data_dict'] = {}
for qbn in self.qb_names:
pdd['split_data_dict'][qbn] = {}
for p in split_params:
dim = self.sp.find_parameter(p)
sv = self.sp.get_sweep_params_property(
'values', param_names=p, dimension=dim)
usp, ind = unique(sv)
if len(usp) <= 1:
continue
svs = [self.sp.subset(ind == i, dim) for i in
range(len(usp))]
[s.remove_sweep_parameter(p) for s in svs]
sdd = {}
pdd['split_data_dict'][qbn][p] = sdd
for i in range(len(usp)):
subset = (np.concatenate(
[ind == i,
[True] * len(pdd['sweep_points_dict'][qbn][
'cal_points_sweep_points'])]))
sdd[i] = {}
sdd[i]['value'] = usp[i]
sdd[i]['sweep_points'] = svs[i]
d = pdd['sweep_points_dict'][qbn]
if dim == 0:
sdd[i]['sweep_points_dict'] = {
'sweep_points': d['sweep_points'][subset],
'msmt_sweep_points':
d['msmt_sweep_points'][ind == i],
'cal_points_sweep_points':
d['cal_points_sweep_points'],
}
sdd[i]['sweep_points_2D_dict'] = pdd[
'sweep_points_2D_dict'][qbn]
else:
sdd[i]['sweep_points_dict'] = \
pdd['sweep_points_dict'][qbn]
sdd[i]['sweep_points_2D_dict'] = {
k: v[ind == i] for k, v in pdd[
'sweep_points_2D_dict'][qbn].items()}
for d in ['projected_data_dict', 'data_to_fit']:
if isinstance(pdd[d][qbn], dict):
if dim == 0:
sdd[i][d] = {k: v[:, subset] for
k, v in pdd[d][qbn].items()}
else:
sdd[i][d] = {k: v[ind == i, :] for
k, v in pdd[d][qbn].items()}
else:
if dim == 0:
sdd[i][d] = pdd[d][qbn][:, subset]
else:
sdd[i][d] = pdd[d][qbn][ind == i, :]
select_split = self.get_param_value('select_split')
if select_split is not None:
for qbn, select in select_split.items():
p, v = select
if p not in pdd['split_data_dict'][qbn]:
log.warning(f"Split parameter {p} for {qbn} not "
f"found. Ignoring this selection.")
try:
ind = [a['value'] for a in pdd['split_data_dict'][
qbn][p].values()].index(v)
except ValueError:
ind = v
try:
pdd['split_data_dict'][qbn][p][ind]
except ValueError:
log.warning(f"Value {v} for split parameter {p} "
f"of {qbn} not found. Ignoring this "
f"selection.")
continue
for d in ['projected_data_dict', 'data_to_fit',
'sweep_points_dict', 'sweep_points_2D_dict']:
pdd[d][qbn] = pdd['split_data_dict'][qbn][p][ind][d]
self.measurement_strings[qbn] += f' ({p}: {v})'
def get_cal_data_points(self):
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
do_PCA = self.rotation_type == 'PCA' or \
self.rotation_type == 'column_PCA'
self.cal_states_dict_for_rotation = OrderedDict()
states = False
cal_states_rotations = self.cal_states_rotations
for key in cal_states_rotations.keys():
if key == 'g' or key == 'e' or key == 'f':
states = True
for qbn in self.qb_names:
self.cal_states_dict_for_rotation[qbn] = OrderedDict()
if states:
cal_states_rot_qb = cal_states_rotations
else:
cal_states_rot_qb = cal_states_rotations.get(qbn, [])
for i in range(len(cal_states_rot_qb)):
cal_state = \
[k for k, idx in cal_states_rot_qb.items()
if idx == i][0]
self.cal_states_dict_for_rotation[qbn][cal_state] = \
None if do_PCA and self.num_cal_points != 3 else \
self.cal_states_dict[cal_state]
def cal_states_analysis(self):
self.get_cal_data_points()
self.proc_data_dict['projected_data_dict'] = OrderedDict(
{qbn: '' for qbn in self.qb_names})
if len(self.data_to_fit):
if not len(self.cal_states_dict):
self.data_to_fit = {qbn: 'pca' for qbn in self.qb_names}
storing_keys = self.data_to_fit
elif len(self.cal_states_dict):
csr = [(k, v) for k, v in self.cal_states_rotations.items()]
csr.sort(key=lambda t: t[1])
storing_keys = {qbn: f'p{csr[-1][0]}' for qbn in self.qb_names}
else:
storing_keys = {qbn: 'pca' for qbn in self.qb_names}
for qbn in self.qb_names:
cal_states_dict = self.cal_states_dict_for_rotation[qbn]
if len(cal_states_dict) not in [0, 2, 3]:
raise NotImplementedError('Calibration states rotation is '
'currently only implemented for 0, '
'2, or 3 cal states per qubit.')
data_mostly_g = self.get_param_value('data_mostly_g',
default_value=True)
if self.get_param_value('TwoD', default_value=False):
if self.rotation_type == 'global_PCA':
self.proc_data_dict['projected_data_dict'].update(
self.global_pca_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, storing_keys,
data_mostly_g=data_mostly_g))
elif len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
elif self.rotation_type == 'fixed_cal_points':
rotated_data_dict, zero_coord, one_coord = \
self.rotate_data_TwoD_same_fixed_cal_idxs(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
storing_keys)
self.proc_data_dict['projected_data_dict'].update(
rotated_data_dict)
self.proc_data_dict['rotation_coordinates'] = \
[zero_coord, one_coord]
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
storing_keys, data_mostly_g=data_mostly_g,
column_PCA=self.rotation_type == 'column_PCA'))
else:
if len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
storing_keys, data_mostly_g=data_mostly_g))
@staticmethod
def rotate_data_3_cal_states(qb_name, meas_results_per_qb, channel_map,
cal_states_dict):
# FOR 3 CAL STATES
rotated_data_dict = OrderedDict()
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
raw_data = np.array([v for v in meas_res_dict.values()]).T
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
rotated_data = predict_proba_avg_ro(raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
return rotated_data_dict
@staticmethod
def rotate_data(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, storing_keys, data_mostly_g=True):
# ONLY WORKS FOR 2 CAL STATES
qb_cal_states = cal_states_dict[qb_name].keys()
if len(qb_cal_states) != 2:
raise ValueError(f'Expected two cal states for {qb_name} '
f'but found {len(qb_cal_states)}: {qb_cal_states}')
other_cs = [cs for cs in qb_cal_states if cs != storing_keys[qb_name][-1]]
if len(other_cs) == 0:
raise ValueError(f'There are no other cal states except for '
f'{storing_keys[qb_name][-1]} from storing_keys.')
elif len(other_cs) > 1:
raise ValueError(f'There is more than one other cal state in '
f'addition to {storing_keys[qb_name][-1]} from '
f'storing_keys. Not clear which one to use.')
other_cs = f'p{other_cs[0]}'
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[0]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]] = data
else:
rotated_data_dict[qb_name][storing_keys[qb_name]] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[0]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
rotated_data_dict[qb_name][other_cs] = \
1 - rotated_data_dict[qb_name][storing_keys[qb_name]]
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=np.array([v for v in meas_res_dict.values()]),
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]] = data
rotated_data_dict[qb_name][other_cs] = \
1 - rotated_data_dict[qb_name][storing_keys[qb_name]]
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
rotated_data_dict[qb_name][ro_suf] = OrderedDict()
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[i]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][
storing_keys[qb_name]] = data
else:
rotated_data_dict[qb_name][ro_suf][
storing_keys[qb_name]] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[i]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
else:
# two RO ch per qubit
keys = [k for k in meas_res_dict if ro_suf in k]
correct_keys = [k for k in keys
if k[len(qb_ro_ch0)+1::] == ro_suf]
data_array = np.array([meas_res_dict[k]
for k in correct_keys])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][
storing_keys[qb_name]] = data
rotated_data_dict[qb_name][ro_suf][other_cs] = \
1 - rotated_data_dict[qb_name][ro_suf][storing_keys[qb_name]]
return rotated_data_dict
@staticmethod
def rotate_data_3_cal_states_TwoD(qb_name, meas_results_per_qb,
channel_map, cal_states_dict):
# FOR 3 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = np.zeros(
raw_data_arr.shape)
for col in range(raw_data_arr.shape[1]):
raw_data = np.concatenate([
v[:, col].reshape(len(v[:, col]), 1) for
v in meas_res_dict.values()], axis=1)
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
# rotated data is (raw_data_arr.shape[0], 3)
rotated_data = predict_proba_avg_ro(
raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'][:, col] = \
rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
# transpose data
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = \
rotated_data_dict[qb_name][f'p{state}'].T
return rotated_data_dict
@staticmethod
def global_pca_TwoD(qb_name, meas_results_per_qb, channel_map,
storing_keys, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('Global PCA is only implemented '
'for two-channel RO!')
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
data_array = np.array(
[v.T.flatten() for v in meas_res_dict.values()])
rot_flat_data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array)
data = np.reshape(rot_flat_data, raw_data_arr.T.shape)
data = a_tools.set_majority_sign(data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, storing_keys,
column_PCA=False, data_mostly_g=True):
# ONLY WORKS FOR 2 CAL STATES
qb_cal_states = cal_states_dict[qb_name].keys()
if len(qb_cal_states) != 2:
raise ValueError(f'Expected two cal states for {qb_name} '
f'but found {len(qb_cal_states)}: {qb_cal_states}')
other_cs = [cs for cs in qb_cal_states if cs != storing_keys[qb_name][-1]]
if len(other_cs) == 0:
raise ValueError(f'There are no other cal states except for '
f'{storing_keys[qb_name][-1]} from storing_keys.')
elif len(other_cs) > 1:
raise ValueError(f'There is more than one other cal state in '
f'addition to {storing_keys[qb_name][-1]} from '
f'storing_keys. Not clear which one to use.')
other_cs = f'p{other_cs[0]}'
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[row, :],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]][col] = data
rotated_data_dict[qb_name][other_cs] = \
1 - rotated_data_dict[qb_name][storing_keys[qb_name]]
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data_array = np.array(
[v[row, :] for v in meas_res_dict.values()])
data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][storing_keys[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][
storing_keys[qb_name]][col] = data
rotated_data_dict[qb_name][other_cs] = \
1 - rotated_data_dict[qb_name][storing_keys[qb_name]]
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
rotated_data_dict[qb_name][ro_suf] = OrderedDict()
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][
storing_keys[qb_name]][col] = data
else:
# two RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for k, v in meas_res_dict.items()
if ro_suf in k])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][
storing_keys[qb_name]][col] = data
rotated_data_dict[qb_name][ro_suf][other_cs] = \
1 - rotated_data_dict[qb_name][ro_suf][storing_keys[qb_name]]
return rotated_data_dict
@staticmethod
def rotate_data_TwoD_same_fixed_cal_idxs(qb_name, meas_results_per_qb,
channel_map, cal_states_dict,
storing_keys):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('rotate_data_TwoD_same_fixed_cal_idxs '
'only implemented for two-channel RO!')
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
# do pca on the one cal states
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rot_dat_e = np.zeros(raw_data_arr.shape[1])
for row in cal_one_points:
rot_dat_e += a_tools.rotate_and_normalize_data_IQ(
data=np.array([v[row, :] for v in meas_res_dict.values()]),
cal_zero_points=None, cal_one_points=None)[0]
rot_dat_e /= len(cal_one_points)
# find the values of the zero and one cal points
col_idx = np.argmax(np.abs(rot_dat_e))
zero_coord = [np.mean([v[r, col_idx] for r in cal_zero_points])
for v in meas_res_dict.values()]
one_coord = [np.mean([v[r, col_idx] for r in cal_one_points])
for v in meas_res_dict.values()]
# rotate all data based on the fixed zero_coord and one_coord
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][storing_keys[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
rotated_data_dict[qb_name][
storing_keys[qb_name]][col], _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
zero_coord=zero_coord,
one_coord=one_coord)
return rotated_data_dict, zero_coord, one_coord
def get_transition_name(self, qb_name):
"""
Extracts the transition_name:
- first by taking transition_name_input from the task in task_list
for qb_name
- then from options_dict/metadata
If not found in any of the above, it is inferred from data_to_fit.
:param qb_name: qubit name
:return: string indicating the transition name ("ge", "ef", etc.)
"""
task_list = self.get_param_value('task_list')
trans_name = self.get_param_value('transition_name')
if task_list is not None:
task = [t for t in task_list if t['qb'] == qb_name][0]
trans_name = task.get('transition_name_input', trans_name)
if trans_name is None:
if 'h' in self.data_to_fit.get(qb_name, ''):
trans_name = 'fh'
elif 'f' in self.data_to_fit.get(qb_name, ''):
trans_name = 'ef'
else:
trans_name = 'ge'
return trans_name
def get_xaxis_label_unit(self, qb_name):
hard_sweep_params = self.get_param_value('hard_sweep_params')
sweep_name = self.get_param_value('sweep_name')
sweep_unit = self.get_param_value('sweep_unit')
if self.sp is not None:
main_sp = self.get_param_value('main_sp', None)
if main_sp is not None and qb_name in main_sp:
param_names = [main_sp[qb_name]]
else:
param_names = self.mospm[qb_name]
_, xunit, xlabel = self.sp.get_sweep_params_description(
param_names=param_names, dimension=0)[0]
elif hard_sweep_params is not None:
xlabel = list(hard_sweep_params)[0]
xunit = list(hard_sweep_params.values())[0][
'unit']
elif (sweep_name is not None) and (sweep_unit is not None):
xlabel = sweep_name
xunit = sweep_unit
else:
xlabel = self.raw_data_dict['sweep_parameter_names']
xunit = self.raw_data_dict['sweep_parameter_units']
if np.ndim(xlabel) > 0:
xlabel = xlabel[0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
return xlabel, xunit
@staticmethod
def get_cal_state_color(cal_state_label):
if cal_state_label == 'g' or cal_state_label == r'$|g\rangle$':
return 'k'
elif cal_state_label == 'e' or cal_state_label == r'$|e\rangle$':
return 'gray'
elif cal_state_label == 'f' or cal_state_label == r'$|f\rangle$':
return 'C8'
elif cal_state_label == 'h' or cal_state_label == r'$|h\rangle$':
return 'C5'
else:
return 'C6'
@staticmethod
def get_latex_prob_label(prob_label):
if '$' in prob_label:
return prob_label
elif 'p' in prob_label.lower():
return r'$|{}\rangle$'.format(prob_label[-1])
else:
return r'$|{}\rangle$'.format(prob_label)
def get_yaxis_label(self, data_key=None, qb_name=None):
if 'pca' in self.rotation_type.lower() or not len(self.cal_states_dict):
return 'Strongest principal component (arb.)'
else:
if data_key is None:
if qb_name is not None and \
self.data_to_fit.get(qb_name, None) is not None:
return '{} state population'.format(
self.get_latex_prob_label(self.data_to_fit[qb_name]))
else:
return 'Measured data'
else:
return '{} state population'.format(
self.get_latex_prob_label(data_key))
def _get_single_shots_per_qb(self, raw=False):
"""
Gets single shots from the proc_data_dict and arranges
them as arrays per qubit
Args:
raw (bool): whether or not to return raw shots (before
data filtering)
Returns: shots_per_qb: dict where keys are qb_names and
values are arrays of shape (n_shots, n_value_names) for
1D measurements and (n_shots*n_soft_sp, n_value_names) for
2D measurements
"""
# prepare data in convenient format, i.e. arrays per qubit
shots_per_qb = dict() # store shots per qb and per state
pdd = self.proc_data_dict # for convenience of notation
key = 'meas_results_per_qb'
if raw:
key += "_raw"
for qbn in self.qb_names:
# if "1D measurement" , shape is (n_shots, n_vn) i.e. one
# column for each value_name (often equal to n_ro_ch)
shots_per_qb[qbn] = \
np.asarray(list(
pdd[key][qbn].values())).T
# if "2D measurement" reshape from (n_soft_sp, n_shots, n_vn)
# to ( n_shots * n_soft_sp, n_ro_ch)
if np.ndim(shots_per_qb[qbn]) == 3:
assert self.get_param_value("TwoD", False) == True, \
"'TwoD' is False but single shot data seems to be 2D"
n_vn = shots_per_qb[qbn].shape[-1]
# put softsweep as inner most loop for easier processing
shots_per_qb[qbn] = np.swapaxes(shots_per_qb[qbn], 0, 1)
# reshape to 2D array
shots_per_qb[qbn] = shots_per_qb[qbn].reshape((-1, n_vn))
# make 2D array in case only one channel (1D array)
elif np.ndim(shots_per_qb[qbn]) == 1:
shots_per_qb[qbn] = np.expand_dims(shots_per_qb[qbn],
axis=-1)
return shots_per_qb
def _get_preselection_masks(self, presel_shots_per_qb, preselection_qbs=None,
predict_proba=True,
classifier_params=None,
preselection_state_int=0):
"""
Prepares preselection masks for each qubit considered in the keys of
"preselection_qbs" using the preslection readouts of presel_shots_per_qb.
Note: this function replaces the use of the "data_filter" lambda function
in the case of single_shot readout.
TODO: in the future, it might make sense to merge this function
with the data_filter.
Args:
presel_shots_per_qb (dict): {qb_name: preselection_shot_readouts}
preselection_qbs (dict): keys are the qubits for which the masks have to be
computed and values are list of qubit to consider jointly for preselection.
e.g. {"qb1": ["qb1", "qb2"], "qb2": ["qb2"]}. In this case shots of qb1 will
only be kept if both qb1 and qb2 are in the state specified by
preselection_state_int (usually, the ground state), while qb2 is preselected
independently of qb1.
Defaults to None: in this case each qubit is preselected independently from others
predict_proba (bool): whether or not to consider input as raw voltages shots.
Should be false if input shots are already probabilities, e.g. when using
classified readout.
classifier_params (dict): classifier params
preselection_state_int (int): integer corresponding to the state of the classifier
on which preselection should be performed. Defaults to 0 (i.e. ground state
in most cases).
Returns:
preselection_masks (dict): dictionary of boolean arrays of shots to keep
(indicated with True) for each qubit
"""
presel_mask_single_qb = {}
for qbn, presel_shots in presel_shots_per_qb.items():
if not predict_proba:
# shots were obtained with classifier detector and
# are already probas
presel_proba = presel_shots_per_qb[qbn]
else:
# use classifier calibrated to classify preselection readouts
presel_proba = a_tools.predict_gm_proba_from_clf(
presel_shots_per_qb[qbn], classifier_params[qbn])
presel_classified = np.argmax(presel_proba, axis=1)
# create boolean array of shots to keep.
# each time ro is the ground state --> true otherwise false
presel_mask_single_qb[qbn] = presel_classified == preselection_state_int
if np.sum(presel_mask_single_qb[qbn]) == 0:
# FIXME: Nathan should probably not be error but just continue
# without preselection ?
raise ValueError(f"{qbn}: No data left after preselection!")
# compute final mask taking into account all qubits in presel_qubits for each qubit
presel_mask = {}
if preselection_qbs is None:
# default is each qubit preselected individually
# note that the list includes the qubit name twice as the minimal
# number of arguments in logical_and.reduce() is 2.
preselection_qbs = {qbn: [qbn] for qbn in presel_shots_per_qb}
for qbn, presel_qbs in preselection_qbs.items():
if len(presel_qbs) == 1:
presel_qbs = [presel_qbs[0], presel_qbs[0]]
presel_mask[qbn] = np.logical_and.reduce(
[presel_mask_single_qb[qb] for qb in presel_qbs])
return presel_mask
def process_single_shots(self, predict_proba=True,
classifier_params=None,
states_map=None):
"""
Processes single shots from proc_data_dict("meas_results_per_qb")
This includes assigning probabilities to each shot (optional),
preselect shots on the ground state if there is a preselection readout,
average the shots/probabilities.
Args:
predict_proba (bool): whether or not to assign probabilities to shots.
If True, it assumes that shots in the proc_data_dict are the
raw voltages on n channels. If False, it assumes either that
shots were acquired with the classifier detector (i.e. shots
are the probabilities of being in each state of the classifier)
or that they are raw voltages. Note that when preselection
the function checks for "classified_ro" and if it is false,
(i.e. the input are raw voltages and not probas) then it uses
the classifier on the preselection readouts regardless of the
"predict_proba" flag (preselection requires classif of ground state).
classifier_params (dict): dict where keys are qb_names and values
are dictionaries of classifier parameters passed to
a_tools.predict_proba_from_clf(). Defaults to
qb.acq_classifier_params(). Note: it
states_map (dict):
list of states corresponding to the different integers output
by the classifier. Defaults to {0: "g", 1: "e", 2: "f", 3: "h"}
Other parameters taken from self.get_param_value:
use_preselection (bool): whether or not preselection should be used
before averaging. If true, then checks if there is a preselection
readout in prep_params and if so, performs preselection on the
ground state
n_shots (int): number of shots per readout. Used to infer the number
of readouts. Defaults to qb.acq_shots. WATCH OUT, sometimes
for mutli-qubit detector uses max(qb.acq_shots() for qb in qbs),
such that acq_shots found in the hdf5 file might be different than
the actual number of shots used for the experiment.
it is therefore safer to pass the number of shots in the metadata.
TwoD (bool): Whether data comes from a 2D sweep, i.e. several concatenated
sequences. Used for proper reshaping when using preselection
Returns:
"""
if states_map is None:
states_map = {0: "g", 1: "e", 2: "f", 3: "h"}
# get preselection information
prep_params_presel = self.prep_params.get('preparation_type', "wait") \
== "preselection"
use_preselection = self.get_param_value("use_preselection", True)
# activate preselection flag only if preselection is in prep_params
# and the user wants to use the preselection readouts
preselection = prep_params_presel and use_preselection
# returns for each qb: (n_shots, n_ch) or (n_soft_sp* n_shots, n_ch)
# where n_soft_sp is the inner most loop i.e. the first dim is ordered as
# (shot0_ssp0, shot0_ssp1, ... , shot1_ssp0, shot1_ssp1, ...)
shots_per_qb = self._get_single_shots_per_qb()
# save single shots in proc_data_dict, as they will be overwritten in
# 'meas_results_per_qb' with their averaged values for the rest of the
# analysis to work.
self.proc_data_dict['single_shots_per_qb'] = deepcopy(shots_per_qb)
# determine number of shots
n_shots = self.get_param_value("n_shots")
if n_shots is None:
# FIXME: this extraction of number of shots won't work with soft repetitions.
n_shots_from_hdf = [
int(self.get_hdf_param_value(f"Instrument settings/{qbn}",
"acq_shots")) for qbn in self.qb_names]
if len(np.unique(n_shots_from_hdf)) > 1:
log.warning("Number of shots extracted from hdf are not all the same:"
"assuming n_shots=max(qb.acq_shots() for qb in qb_names)")
n_shots = np.max(n_shots_from_hdf)
# determine number of readouts per sequence
if self.get_param_value("TwoD", False):
n_seqs = self.sp.length(1) # corresponds to number of soft sweep points
else:
n_seqs = 1
# n_reaouds refers to the number of readouts per sequence after filtering out e.g.
# preselection readouts
n_readouts = list(shots_per_qb.values())[0].shape[0] // (n_shots * n_seqs)
# get classification parameters
if classifier_params is None:
classifier_params = {}
from numpy import array # for eval
for qbn in self.qb_names:
classifier_params[qbn] = eval(self.get_hdf_param_value(
f'Instrument settings/{qbn}', "acq_classifier_params"))
# prepare preselection mask
if preselection:
# get preselection readouts
shots_per_qb_before_filtering = self._get_single_shots_per_qb(raw=True)
n_ro_before_filtering = \
list(shots_per_qb_before_filtering.values())[0].shape[0] // \
(n_shots * n_seqs)
preselection_ro_mask = \
np.tile([True] * n_seqs +
[False] * (n_ro_before_filtering - n_readouts) * n_seqs,
n_shots * n_readouts)
presel_shots_per_qb = \
{qbn: presel_shots[preselection_ro_mask] for qbn, presel_shots in
shots_per_qb_before_filtering.items()}
# create boolean array of shots to keep.
# each time ro is the ground state --> true otherwise false
g_state_int = [k for k, v in states_map.items() if v == "g"][0]
preselection_masks = self._get_preselection_masks(
presel_shots_per_qb,
preselection_qbs=self.get_param_value("preselection_qbs"),
predict_proba= not self.get_param_value('classified_ro', False),
classifier_params=classifier_params,
preselection_state_int=g_state_int)
self.proc_data_dict['percent_data_after_presel'] = {} #initialize
else:
# keep all shots
preselection_masks = {qbn: np.ones(len(shots), dtype=bool)
for qbn, shots in shots_per_qb.items()}
self.proc_data_dict['preselection_masks'] = preselection_masks
# process single shots per qubit
for qbn, shots in shots_per_qb.items():
if predict_proba:
# shots become probabilities with shape (n_shots, n_states)
try:
shots = a_tools.predict_gm_proba_from_clf(
shots, classifier_params[qbn])
except ValueError as e:
log.error(f'If the following error relates to number'
' of features, probably wrong classifer parameters'
' were passed (e.g. a classifier trained with'
' a different number of channels than in the'
f' current measurement): {e}')
raise e
if not 'single_shots_per_qb_probs' in self.proc_data_dict:
self.proc_data_dict['single_shots_per_qb_probs'] = {}
self.proc_data_dict['single_shots_per_qb_probs'][qbn] = shots
# TODO: Nathan: if predict_proba is activated then we should
# first classify, then do a count table and thereby estimate
# average proba
averaged_shots = [] # either raw voltage shots or probas
preselection_percentages = []
for ro in range(n_readouts*n_seqs):
shots_single_ro = shots[ro::n_readouts*n_seqs]
presel_mask_single_ro = preselection_masks[qbn][ro::n_readouts*n_seqs]
preselection_percentages.append(100*np.sum(presel_mask_single_ro)/
len(presel_mask_single_ro))
averaged_shots.append(
np.mean(shots_single_ro[presel_mask_single_ro], axis=0))
if self.get_param_value("TwoD", False):
averaged_shots = np.reshape(averaged_shots, (n_readouts, n_seqs, -1))
averaged_shots = np.swapaxes(averaged_shots, 0, 1) # return to original 2D shape
# reshape to (n_prob or n_ch or 1, n_readouts) if 1d
# or (n_prob or n_ch or 1, n_readouts, n_ssp) if 2d
averaged_shots = np.array(averaged_shots).T
if preselection:
self.proc_data_dict['percent_data_after_presel'][qbn] = \
f"{np.mean(preselection_percentages):.2f} $\\pm$ " \
f"{np.std(preselection_percentages):.2f}%"
if predict_proba:
# value names are different from what was previously in
# meas_results_per_qb and therefore "artificial" values
# are made based on states
self.proc_data_dict['meas_results_per_qb'][qbn] = \
{"p" + states_map[i]: p for i, p in enumerate(averaged_shots)}
else:
# reuse value names that were already there if did not classify
for i, k in enumerate(
self.proc_data_dict['meas_results_per_qb'][qbn]):
self.proc_data_dict['meas_results_per_qb'][qbn][k] = \
averaged_shots[i]
def prepare_plots(self):
if self.get_param_value('plot_proj_data', default_value=True):
select_split = self.get_param_value('select_split')
fig_name_suffix = self.get_param_value('fig_name_suffix', '')
title_suffix = self.get_param_value('title_suffix', '')
for qb_name, corr_data in self.proc_data_dict[
'projected_data_dict'].items():
fig_name = f'projected_plot_{qb_name}'
title_suf = title_suffix
if select_split is not None:
param, idx = select_split[qb_name]
# remove qb_name from param
p = '_'.join([e for e in param.split('_') if e != qb_name])
# create suffix
suf = f'({p}, {str(np.round(idx, 3))})'
# add suffix
fig_name += f'_{suf}'
title_suf = f'{suf}_{title_suf}' if \
len(title_suf) else suf
if isinstance(corr_data, dict):
for data_key, data in corr_data.items():
fn = f'{fig_name}_{data_key}'
if not self.rotate:
data_label = data_key
plot_name_suffix = data_key
plot_cal_points = False
data_axis_label = 'Population'
else:
data_label = 'Data'
plot_name_suffix = ''
plot_cal_points = (
not self.options_dict.get('TwoD', False))
data_axis_label = self.get_yaxis_label(data_key,
qb_name)
tf = f'{data_key}_{title_suf}' if \
len(title_suf) else data_key
self.prepare_projected_data_plot(
fn, data, qb_name=qb_name,
data_label=data_label,
title_suffix=tf,
plot_name_suffix=plot_name_suffix,
fig_name_suffix=fig_name_suffix,
data_axis_label=data_axis_label,
plot_cal_points=plot_cal_points)
else:
fig_name = 'projected_plot_' + qb_name
self.prepare_projected_data_plot(
fig_name, corr_data, qb_name=qb_name,
plot_cal_points=(
not self.options_dict.get('TwoD', False)))
if self.get_param_value('plot_raw_data', default_value=True):
self.prepare_raw_data_plots(plot_filtered=False)
if 'preparation_params' in self.metadata:
if 'active' in self.metadata['preparation_params'].get(
'preparation_type', 'wait'):
self.prepare_raw_data_plots(plot_filtered=True)
def prepare_raw_data_plots(self, plot_filtered=False):
if plot_filtered or not self.data_with_reset:
key = 'meas_results_per_qb'
suffix = 'filtered' if self.data_with_reset else ''
func_for_swpts = lambda qb_name: self.proc_data_dict[
'sweep_points_dict'][qb_name]['sweep_points']
else:
key = 'meas_results_per_qb_raw'
suffix = ''
func_for_swpts = lambda qb_name: self.raw_data_dict[
'hard_sweep_points']
for qb_name, raw_data_dict in self.proc_data_dict[key].items():
if qb_name not in self.qb_names:
continue
sweep_points = func_for_swpts(qb_name)
if len(raw_data_dict) == 1:
numplotsx = 1
numplotsy = 1
elif len(raw_data_dict) == 2:
numplotsx = 1
numplotsy = 2
else:
numplotsx = 2
numplotsy = len(raw_data_dict) // 2 + len(raw_data_dict) % 2
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
fig_title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\nRaw data ' + suffix + ' ' + qb_name)
plot_name = 'raw_plot_' + qb_name + suffix
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
for ax_id, ro_channel in enumerate(raw_data_dict):
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict[
'sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_name}_{ro_channel}_{pn}'] = {
'fig_id': plot_name + '_' + pn,
'ax_id': ax_id,
'plotfn': self.plot_colorxy,
'xvals': sweep_points,
'yvals': ssp,
'zvals': raw_data_dict[ro_channel].T,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title,
'clabel': '{} (Vpeak)'.format(ro_channel)}
else:
self.plot_dicts[plot_name + '_' + ro_channel] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': raw_data_dict[ro_channel],
'ylabel': '{} (Vpeak)'.format(ro_channel),
'yunit': '',
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title}
if len(raw_data_dict) == 1:
self.plot_dicts[
plot_name + '_' + list(raw_data_dict)[0]]['ax_id'] = None
def prepare_projected_data_plot(
self, fig_name, data, qb_name, title_suffix='', sweep_points=None,
plot_cal_points=True, plot_name_suffix='', fig_name_suffix='',
data_label='Data', data_axis_label='', do_legend_data=True,
do_legend_cal_states=True, TwoD=None, yrange=None):
if len(fig_name_suffix):
fig_name = f'{fig_name}_{fig_name_suffix}'
if data_axis_label == '':
data_axis_label = self.get_yaxis_label(qb_name=qb_name)
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if sweep_points is None:
sweep_points = self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points']
plot_names_cal = []
if plot_cal_points and self.num_cal_points != 0:
yvals = data[:-self.num_cal_points]
xvals = sweep_points[:-self.num_cal_points]
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name_cal = fig_name + '_' + \
list(self.cal_states_dict)[i] + '_' + \
plot_name_suffix
plot_names_cal += [plot_dict_name_cal]
self.plot_dicts[plot_dict_name_cal] = {
'fig_id': fig_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': sweep_points[cal_pts_idxs],
'yvals': data[cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': do_legend_cal_states,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])},
'yrange': yrange,
}
self.plot_dicts[plot_dict_name_cal+'_line'] = {
'fig_id': fig_name,
'plotsize': plotsize,
'plotfn': self.plot_hlines,
'y': np.mean(data[cal_pts_idxs]),
'xmin': sweep_points[0],
'xmax': sweep_points[-1],
'colors': 'gray'}
else:
yvals = data
xvals = sweep_points
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
title += '\n' + f'{qb_name}_{title_suffix}' if len(title_suffix) else \
' ' + qb_name
plot_dict_name = f'{fig_name}_{plot_name_suffix}'
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
if TwoD is None:
TwoD = self.get_param_value('TwoD', default_value=False)
if TwoD:
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict['sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_dict_name}_{pn}'] = {
'plotfn': self.plot_colorxy,
'fig_id': fig_name + '_' + pn,
'xvals': xvals,
'yvals': ssp,
'zvals': yvals,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'zrange': self.get_param_value('zrange', None),
'title': title,
'clabel': data_axis_label}
else:
self.plot_dicts[plot_dict_name] = {
'plotfn': self.plot_line,
'fig_id': fig_name,
'plotsize': plotsize,
'xvals': xvals,
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'ylabel': data_axis_label,
'yunit': '',
'setlabel': data_label,
'title': title,
'linestyle': 'none',
'do_legend': do_legend_data,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
# add plot_params to each plot dict
plot_params = self.get_param_value('plot_params', default_value={})
for plt_name in self.plot_dicts:
self.plot_dicts[plt_name].update(plot_params)
if len(plot_names_cal) > 0:
if do_legend_data and not do_legend_cal_states:
for plot_name in plot_names_cal:
plot_dict_cal = self.plot_dicts.pop(plot_name)
self.plot_dicts[plot_name] = plot_dict_cal
def get_first_sweep_param(self, qbn=None, dimension=0):
"""
Get properties of the first sweep param in the given dimension
(potentially for the given qubit).
:param qbn: (str) qubit name. If None, all sweep params are considered.
:param dimension: (float, default: 0) sweep dimension to be considered.
:return: a 3-tuple of label, unit, and array of values
"""
if not hasattr(self, 'mospm'):
return None
if qbn is None:
param_name = [p for v in self.mospm.values() for p in v
if self.sp.find_parameter(p) == 1]
else:
param_name = [p for p in self.mospm[qbn]
if self.sp.find_parameter(p)]
if not len(param_name):
return None
param_name = param_name[0]
label = self.sp.get_sweep_params_property(
'label', dimension=dimension, param_names=param_name)
unit = self.sp.get_sweep_params_property(
'unit', dimension=dimension, param_names=param_name)
vals = self.sp.get_sweep_params_property(
'values', dimension=dimension, param_names=param_name)
return label, unit, vals
class Idling_Error_Rate_Analyisis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
post_sel_th = self.options_dict.get('post_sel_th', 0.5)
raw_shots = self.raw_data_dict['measured_values'][0][0]
post_sel_shots = raw_shots[::2]
data_shots = raw_shots[1::2]
data_shots[np.where(post_sel_shots > post_sel_th)] = np.nan
states = ['0', '1', '+']
self.proc_data_dict['xvals'] = np.unique(self.raw_data_dict['xvals'])
for i, state in enumerate(states):
self.proc_data_dict['shots_{}'.format(state)] =data_shots[i::3]
self.proc_data_dict['yvals_{}'.format(state)] = \
np.nanmean(np.reshape(self.proc_data_dict['shots_{}'.format(state)],
(len(self.proc_data_dict['xvals']), -1),
order='F'), axis=1)
def prepare_plots(self):
# assumes that value names are unique in an experiment
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
self.plot_dicts['Prepare in {}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': xvals,
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Counts',
'yrange': [0, 1],
'xrange': self.options_dict.get('xrange', None),
'yunit': 'frac',
'setlabel': 'Prepare in {}'.format(state),
'do_legend':True,
'title': (self.raw_data_dict['timestamps'][0]+' - ' +
self.raw_data_dict['timestamps'][-1] + '\n' +
self.raw_data_dict['measurementstring'][0]),
'legend_pos': 'upper right'}
if self.do_fitting:
for state in ['0', '1', '+']:
self.plot_dicts['fit_{}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit {}'.format(state)]['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'fit |{}>'.format(state),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['fit_text']={
'ax_id':'main',
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': self.proc_data_dict['fit_msg']}
def analyze_fit_results(self):
fit_msg =''
states = ['0', '1', '+']
for state in states:
fr = self.fit_res['fit {}'.format(state)]
N1 = fr.params['N1'].value, fr.params['N1'].stderr
N2 = fr.params['N2'].value, fr.params['N2'].stderr
fit_msg += ('Prep |{}> : \n\tN_1 = {:.2g} $\pm$ {:.2g}'
'\n\tN_2 = {:.2g} $\pm$ {:.2g}\n').format(
state, N1[0], N1[1], N2[0], N2[1])
self.proc_data_dict['fit_msg'] = fit_msg
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
mod = lmfit.Model(fit_mods.idle_error_rate_exp_decay)
mod.guess = fit_mods.idle_err_rate_guess.__get__(mod, mod.__class__)
# Done here explicitly so that I can overwrite a specific guess
guess_pars = mod.guess(N=xvals, data=yvals)
vary_N2 = self.options_dict.get('vary_N2', True)
if not vary_N2:
guess_pars['N2'].value = 1e21
guess_pars['N2'].vary = False
self.fit_dicts['fit {}'.format(states[i])] = {
'model': mod,
'fit_xvals': {'N': xvals},
'fit_yvals': {'data': yvals},
'guess_pars': guess_pars}
# Allows fixing the double exponential coefficient
class Grovers_TwoQubitAllStates_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
for idx in [0,1]:
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(idx)] = \
self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals_{}'.format(idx)] = yvals
y0 = self.proc_data_dict['yvals_0']
y1 = self.proc_data_dict['yvals_1']
p_success = ((y0[0]*y1[0]) +
(1-y0[1])*y1[1] +
(y0[2])*(1-y1[2]) +
(1-y0[3])*(1-y1[3]) )/4
self.proc_data_dict['p_success'] = p_success
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i in [0, 1]:
yvals = self.proc_data_dict['yvals_{}'.format(i)]
xvals = self.raw_data_dict['xvals'][0]
ylabel = self.proc_data_dict['ylabel_{}'.format(i)]
self.plot_dicts['main_{}'.format(ylabel)] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_{}'.format(i)],
'ylabel': ylabel,
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': False,
'legend_pos': 'upper right'}
self.plot_dicts['limit_text']={
'ax_id':'main_{}'.format(ylabel),
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': 'P succes = {:.3f}'.format(self.proc_data_dict['p_success'])}
class FlippingAnalysis(Single_Qubit_TimeDomainAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = True
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# This analysis makes a hardcoded assumption on the calibration points
self.options_dict['cal_points'] = [list(range(-4, -2)),
list(range(-2, 0))]
self.numeric_params = []
if auto:
self.run_analysis()
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
# Even though we expect an exponentially damped oscillation we use
# a simple cosine as this gives more reliable fitting and we are only
# interested in extracting the frequency of the oscillation
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# This enforces the oscillation to start at the equator
# and ensures that any over/under rotation is absorbed in the
# frequency
guess_pars['amplitude'].value = 0.5
guess_pars['amplitude'].vary = False
guess_pars['offset'].value = 0.5
guess_pars['offset'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
# In the case there are very few periods we fall back on a small
# angle approximation to extract the drive detuning
poly_mod = lmfit.models.PolynomialModel(degree=1)
# the detuning can be estimated using on a small angle approximation
# c1 = d/dN (cos(2*pi*f N) ) evaluated at N = 0 -> c1 = -2*pi*f
poly_mod.set_param_hint('frequency', expr='-c1/(2*pi)')
guess_pars = poly_mod.guess(x=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# Constraining the line ensures that it will only give a good fit
# if the small angle approximation holds
guess_pars['c0'].vary = False
guess_pars['c0'].value = 0.5
self.fit_dicts['line_fit'] = {
'model': poly_mod,
'fit_xvals': {'x': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
sf_line = self._get_scale_factor_line()
sf_cos = self._get_scale_factor_cos()
self.proc_data_dict['scale_factor'] = self.get_scale_factor()
msg = 'Scale fact. based on '
if self.proc_data_dict['scale_factor'] == sf_cos:
msg += 'cos fit\n'
else:
msg += 'line fit\n'
msg += 'cos fit: {:.4f}\n'.format(sf_cos)
msg += 'line fit: {:.4f}'.format(sf_line)
self.raw_data_dict['scale_factor_msg'] = msg
# TODO: save scale factor to file
def get_scale_factor(self):
"""
Returns the scale factor that should correct for the error in the
pulse amplitude.
"""
# Model selection based on the Bayesian Information Criterion (BIC)
# as calculated by lmfit
if (self.fit_dicts['line_fit']['fit_res'].bic <
self.fit_dicts['cos_fit']['fit_res'].bic):
scale_factor = self._get_scale_factor_line()
else:
scale_factor = self._get_scale_factor_cos()
return scale_factor
def _get_scale_factor_cos(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['cos_fit']['fit_res'].params['frequency']
# the square is needed to account for the difference between
# power and amplitude
scale_factor = (1+frequency)**2
phase = np.rad2deg(self.fit_dicts['cos_fit']['fit_res'].params['phase']) % 360
# phase ~90 indicates an under rotation so the scale factor
# has to be larger than 1. A phase ~270 indicates an over
# rotation so then the scale factor has to be smaller than one.
if phase > 180:
scale_factor = 1/scale_factor
return scale_factor
def _get_scale_factor_line(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['line_fit']['fit_res'].params['frequency']
scale_factor = (1+frequency)**2
# no phase sign check is needed here as this is contained in the
# sign of the coefficient
return scale_factor
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['sweep_points'],
'xlabel': self.raw_data_dict['xlabel'],
'xunit': self.raw_data_dict['xunit'], # does not do anything yet
'yvals': self.proc_data_dict['corr_data'],
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': 'data',
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']),
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'line fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': self.raw_data_dict['scale_factor_msg']}
class Intersect_Analysis(Single_Qubit_TimeDomainAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
ch_idx_A (int) specifies first channel for intercept
ch_idx_B (int) specifies second channel for intercept if same as first
it will assume data was taken interleaved.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_A" and "ch_idx_B"
specified in the options dict. If ch_idx_A and ch_idx_B are the same
it will unzip the data.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
ch_idx_A = self.options_dict.get('ch_idx_A', 0)
ch_idx_B = self.options_dict.get('ch_idx_B', 0)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx_A]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx_A]
if ch_idx_A == ch_idx_B:
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_A'] = yvals[::2]
self.proc_data_dict['yvals_B'] = yvals[1::2]
else:
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['yvals_A'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['yvals_B'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_B][0]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_A'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_A']},
'fit_yvals': {'data': self.proc_data_dict['yvals_A']}}
self.fit_dicts['line_fit_B'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_B']},
'fit_yvals': {'data': self.proc_data_dict['yvals_B']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_A'].best_values
fr_1 = self.fit_res['line_fit_B'].best_values
c0 = (fr_0['c0'] - fr_1['c0'])
c1 = (fr_0['c1'] - fr_1['c1'])
c2 = (fr_0['c2'] - fr_1['c2'])
poly_coeff = [c0, c1, c2]
poly = np.polynomial.polynomial.Polynomial([fr_0['c0'],
fr_0['c1'], fr_0['c2']])
ic = np.polynomial.polynomial.polyroots(poly_coeff)
self.proc_data_dict['intersect_L'] = ic[0], poly(ic[0])
self.proc_data_dict['intersect_R'] = ic[1], poly(ic[1])
if (((np.min(self.proc_data_dict['xvals']))< ic[0]) and
( ic[0] < (np.max(self.proc_data_dict['xvals'])))):
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_L']
else:
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_R']
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_A'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_A'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'A',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_B'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_B'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'B',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_A'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_A']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit A',
'do_legend': True}
self.plot_dicts['line_fit_B'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_B']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit B',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['intersect'][0],
self.proc_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['intersect'][0]],
'yvals': [self.proc_data_dict['intersect'][1]],
'line_kws': {'alpha': .5, 'color':'gray',
'markersize':15},
'marker': 'o',
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_intersect(self):
return self.proc_data_dict['intersect']
class CZ_1QPhaseCal_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept for a single qubit phase calibration
experiment
N.B. this is a less generic version of "Intersect_Analysis" and should
be deprecated (MAR Dec 2017)
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx" in options dict and
then splits the data for th
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx = self.options_dict['ch_idx']
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_off'] = yvals[::2]
self.proc_data_dict['yvals_on'] = yvals[1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_off'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_off']},
'fit_yvals': {'data': self.proc_data_dict['yvals_off']}}
self.fit_dicts['line_fit_on'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_on']},
'fit_yvals': {'data': self.proc_data_dict['yvals_on']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_off'].best_values
fr_1 = self.fit_res['line_fit_on'].best_values
ic = -(fr_0['c0'] - fr_1['c0'])/(fr_0['c1'] - fr_1['c1'])
self.proc_data_dict['zero_phase_diff_intersect'] = ic
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_off'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_on'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['line_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['zero_phase_diff_intersect'],
self.raw_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['zero_phase_diff_intersect']],
'yvals': [np.mean(self.proc_data_dict['xvals_on'])],
'line_kws': {'alpha': 0},
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_zero_phase_diff_intersect(self):
return self.proc_data_dict['zero_phase_diff_intersect']
class Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Very basic analysis to determine the phase of a single oscillation
that has an assumed period of 360 degrees.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
idx = 1
self.proc_data_dict['yvals'] = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['xvals'][0],
data=self.proc_data_dict['yvals'], freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['xvals'][0]},
'fit_yvals': {'data': self.proc_data_dict['yvals']},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr = self.fit_res['cos_fit'].best_values
self.proc_data_dict['phi'] = np.rad2deg(fr['phase'])
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit',
'do_legend': True}
class Conditional_Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract quantities from a conditional oscillation.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_osc" and
"ch_idx_spec" in the options dict and then splits the data for the
off and on cases
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx_spec = self.options_dict.get('ch_idx_spec', 0)
ch_idx_osc = self.options_dict.get('ch_idx_osc', 1)
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
i = 0
for idx, type_str in zip([ch_idx_osc, ch_idx_spec], ['osc', 'spec']):
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(type_str)] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[i][0],
cal_one_points=cal_points[i][1])
i +=1
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
else:
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_off'][:-2],
data=self.proc_data_dict['yvals_osc_off'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_off'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_off'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_off'][:-2]},
'guess_pars': guess_pars}
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_on'][:-2],
data=self.proc_data_dict['yvals_osc_on'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_on'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_on'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_on'][:-2]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr_0 = self.fit_res['cos_fit_off'].params
fr_1 = self.fit_res['cos_fit_on'].params
phi0 = np.rad2deg(fr_0['phase'].value)
phi1 = np.rad2deg(fr_1['phase'].value)
phi0_stderr = np.rad2deg(fr_0['phase'].stderr)
phi1_stderr = np.rad2deg(fr_1['phase'].stderr)
self.proc_data_dict['phi_0'] = phi0, phi0_stderr
self.proc_data_dict['phi_1'] = phi1, phi1_stderr
phi_cond_stderr = (phi0_stderr**2+phi1_stderr**2)**.5
self.proc_data_dict['phi_cond'] = (phi1 -phi0), phi_cond_stderr
osc_amp = np.mean([fr_0['amplitude'], fr_1['amplitude']])
osc_amp_stderr = np.sqrt(fr_0['amplitude'].stderr**2 +
fr_1['amplitude']**2)/2
self.proc_data_dict['osc_amp_0'] = (fr_0['amplitude'].value,
fr_0['amplitude'].stderr)
self.proc_data_dict['osc_amp_1'] = (fr_1['amplitude'].value,
fr_1['amplitude'].stderr)
self.proc_data_dict['osc_offs_0'] = (fr_0['offset'].value,
fr_0['offset'].stderr)
self.proc_data_dict['osc_offs_1'] = (fr_1['offset'].value,
fr_1['offset'].stderr)
offs_stderr = (fr_0['offset'].stderr**2+fr_1['offset'].stderr**2)**.5
self.proc_data_dict['offs_diff'] = (
fr_1['offset'].value - fr_0['offset'].value, offs_stderr)
# self.proc_data_dict['osc_amp'] = (osc_amp, osc_amp_stderr)
self.proc_data_dict['missing_fraction'] = (
np.mean(self.proc_data_dict['yvals_spec_on'][:-2]) -
np.mean(self.proc_data_dict['yvals_spec_off'][:-2]))
def prepare_plots(self):
self._prepare_main_oscillation_figure()
self._prepare_spectator_qubit_figure()
def _prepare_main_oscillation_figure(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_off'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_on'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['cos_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
# offset as a guide for the eye
y = self.fit_res['cos_fit_off'].params['offset'].value
self.plot_dicts['cos_off_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C0', 'linestyle': 'dotted'}
}
phase_message = (
'Phase diff.: {:.1f} $\pm$ {:.1f} deg\n'
'Phase off: {:.1f} $\pm$ {:.1f}deg\n'
'Phase on: {:.1f} $\pm$ {:.1f}deg\n'
'Osc. amp. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. amp. on: {:.4f} $\pm$ {:.4f}\n'
'Offs. diff.: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. on: {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['phi_cond'][0],
self.proc_data_dict['phi_cond'][1],
self.proc_data_dict['phi_0'][0],
self.proc_data_dict['phi_0'][1],
self.proc_data_dict['phi_1'][0],
self.proc_data_dict['phi_1'][1],
self.proc_data_dict['osc_amp_0'][0],
self.proc_data_dict['osc_amp_0'][1],
self.proc_data_dict['osc_amp_1'][0],
self.proc_data_dict['osc_amp_1'][1],
self.proc_data_dict['offs_diff'][0],
self.proc_data_dict['offs_diff'][1],
self.proc_data_dict['osc_offs_0'][0],
self.proc_data_dict['osc_offs_0'][1],
self.proc_data_dict['osc_offs_1'][0],
self.proc_data_dict['osc_offs_1'][1]))
self.plot_dicts['phase_message'] = {
'ax_id': 'main',
'ypos': 0.9,
'xpos': 1.45,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': phase_message}
def _prepare_spectator_qubit_figure(self):
self.plot_dicts['spectator_qubit'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_off'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['spec_on'] = {
'plotfn': self.plot_line,
'ax_id': 'spectator_qubit',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_on'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
leak_msg = (
'Missing fraction: {:.2f} % '.format(
self.proc_data_dict['missing_fraction']*100))
self.plot_dicts['leak_msg'] = {
'ax_id': 'spectator_qubit',
'ypos': 0.7,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': leak_msg}
# offset as a guide for the eye
y = self.fit_res['cos_fit_on'].params['offset'].value
self.plot_dicts['cos_on_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C1', 'linestyle': 'dotted'}
}
class StateTomographyAnalysis(ba.BaseDataAnalysis):
"""
Analyses the results of the state tomography experiment and calculates
the corresponding quantum state.
Possible options that can be passed in the options_dict parameter:
cal_points: A data structure specifying the indices of the calibration
points. See the AveragedTimedomainAnalysis for format.
The calibration points need to be in the same order as the
used basis for the result.
data_type: 'averaged' or 'singleshot'. For singleshot data each
measurement outcome is saved and arbitrary order correlations
between the states can be calculated.
meas_operators: (optional) A list of qutip operators or numpy 2d arrays.
This overrides the measurement operators otherwise
found from the calibration points.
covar_matrix: (optional) The covariance matrix of the measurement
operators as a 2d numpy array. Overrides the one found
from the calibration points.
use_covariance_matrix (bool): Flag to define whether to use the
covariance matrix
basis_rots_str: A list of standard PycQED pulse names that were
applied to qubits before measurement
basis_rots: As an alternative to single_qubit_pulses, the basis
rotations applied to the system as qutip operators or numpy
matrices can be given.
mle: True/False, whether to do maximum likelihood fit. If False, only
least squares fit will be done, which could give negative
eigenvalues for the density matrix.
imle: True/False, whether to do iterative maximum likelihood fit. If
True, it takes preference over maximum likelihood method. Otherwise
least squares fit will be done, then 'mle' option will be checked.
pauli_raw: True/False, extracts Pauli expected values from a measurement
without assignment correction based on calibration data. If True,
takes preference over other methods except pauli_corr.
pauli_values: True/False, extracts Pauli expected values from a
measurement with assignment correction based on calibration data.
If True, takes preference over other methods.
iterations (optional): maximum number of iterations allowed in imle.
Tomographies with more qubits require more iterations to converge.
tolerance (optional): minimum change across iterations allowed in imle.
The iteration will stop if it goes under this value. Tomographies
with more qubits require smaller tolerance to converge.
rho_target (optional): A qutip density matrix that the result will be
compared to when calculating fidelity.
"""
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, **kwargs)
kwargs['auto'] = auto
self.single_timestamp = True
self.params_dict = {'exp_metadata': 'exp_metadata'}
self.numeric_params = []
self.data_type = self.options_dict['data_type']
if self.data_type == 'averaged':
self.base_analysis = AveragedTimedomainAnalysis(*args, **kwargs)
elif self.data_type == 'singleshot':
self.base_analysis = roa.MultiQubit_SingleShot_Analysis(
*args, **kwargs)
else:
raise KeyError("Invalid tomography data mode: '" + self.data_type +
"'. Valid modes are 'averaged' and 'singleshot'.")
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
tomography_qubits = self.options_dict.get('tomography_qubits', None)
data, Fs, Omega = self.base_analysis.measurement_operators_and_results(
tomography_qubits)
if 'data_filter' in self.options_dict:
data = self.options_dict['data_filter'](data.T).T
data = data.T
for i, v in enumerate(data):
data[i] = v / v.sum()
data = data.T
Fs = self.options_dict.get('meas_operators', Fs)
Fs = [qtp.Qobj(F) for F in Fs]
d = Fs[0].shape[0]
self.proc_data_dict['d'] = d
Omega = self.options_dict.get('covar_matrix', Omega)
if Omega is None:
Omega = np.diag(np.ones(len(Fs)))
elif len(Omega.shape) == 1:
Omega = np.diag(Omega)
metadata = self.raw_data_dict.get('exp_metadata',
self.options_dict.get(
'exp_metadata', {}))
if metadata is None:
metadata = {}
self.raw_data_dict['exp_metadata'] = metadata
basis_rots_str = metadata.get('basis_rots_str', None)
basis_rots_str = self.options_dict.get('basis_rots_str', basis_rots_str)
if basis_rots_str is not None:
nr_qubits = int(np.round(np.log2(d)))
pulse_list = list(itertools.product(basis_rots_str,
repeat=nr_qubits))
rotations = tomo.standard_qubit_pulses_to_rotations(pulse_list)
else:
rotations = metadata.get('basis_rots', None)
rotations = self.options_dict.get('basis_rots', rotations)
if rotations is None:
raise KeyError("Either 'basis_rots_str' or 'basis_rots' "
"parameter must be passed in the options "
"dictionary or in the experimental metadata.")
rotations = [qtp.Qobj(U) for U in rotations]
all_Fs = tomo.rotated_measurement_operators(rotations, Fs)
all_Fs = [all_Fs[i][j]
for j in range(len(all_Fs[0]))
for i in range(len(all_Fs))]
all_mus = np.array(list(itertools.chain(*data.T)))
all_Omegas = sp.linalg.block_diag(*[Omega] * len(data[0]))
self.proc_data_dict['meas_operators'] = all_Fs
self.proc_data_dict['covar_matrix'] = all_Omegas
self.proc_data_dict['meas_results'] = all_mus
if self.options_dict.get('pauli_values', False):
rho_pauli = tomo.pauli_values_tomography(all_mus,Fs,basis_rots_str)
self.proc_data_dict['rho_raw'] = rho_pauli
self.proc_data_dict['rho'] = rho_pauli
elif self.options_dict.get('pauli_raw', False):
pauli_raw = self.generate_raw_pauli_set()
rho_raw = tomo.pauli_set_to_density_matrix(pauli_raw)
self.proc_data_dict['rho_raw'] = rho_raw
self.proc_data_dict['rho'] = rho_raw
elif self.options_dict.get('imle', False):
it = metadata.get('iterations', None)
it = self.options_dict.get('iterations', it)
tol = metadata.get('tolerance', None)
tol = self.options_dict.get('tolerance', tol)
rho_imle = tomo.imle_tomography(
all_mus, all_Fs, it, tol)
self.proc_data_dict['rho_imle'] = rho_imle
self.proc_data_dict['rho'] = rho_imle
else:
rho_ls = tomo.least_squares_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False)
else None )
self.proc_data_dict['rho_ls'] = rho_ls
self.proc_data_dict['rho'] = rho_ls
if self.options_dict.get('mle', False):
rho_mle = tomo.mle_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False) else None,
rho_guess=rho_ls)
self.proc_data_dict['rho_mle'] = rho_mle
self.proc_data_dict['rho'] = rho_mle
rho = self.proc_data_dict['rho']
self.proc_data_dict['purity'] = (rho * rho).tr().real
rho_target = metadata.get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
self.proc_data_dict['fidelity'] = tomo.fidelity(rho, rho_target)
if d == 4:
self.proc_data_dict['concurrence'] = tomo.concurrence(rho)
else:
self.proc_data_dict['concurrence'] = 0
def prepare_plots(self):
self.prepare_density_matrix_plot()
d = self.proc_data_dict['d']
if 2 ** (d.bit_length() - 1) == d:
# dimension is power of two, plot expectation values of pauli
# operators
self.prepare_pauli_basis_plot()
def prepare_density_matrix_plot(self):
self.tight_fig = self.options_dict.get('tight_fig', False)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
d = self.proc_data_dict['d']
xtick_labels = self.options_dict.get('rho_ticklabels', None)
ytick_labels = self.options_dict.get('rho_ticklabels', None)
if 2 ** (d.bit_length() - 1) == d:
nr_qubits = d.bit_length() - 1
fmt_string = '{{:0{}b}}'.format(nr_qubits)
labels = [fmt_string.format(i) for i in range(2 ** nr_qubits)]
if xtick_labels is None:
xtick_labels = ['$|' + lbl + r'\rangle$' for lbl in labels]
if ytick_labels is None:
ytick_labels = [r'$\langle' + lbl + '|$' for lbl in labels]
color = (0.5 * np.angle(self.proc_data_dict['rho'].full()) / np.pi) % 1.
cmap = self.options_dict.get('rho_colormap', self.default_phase_cmap())
if self.options_dict.get('pauli_raw', False):
title = 'Density matrix reconstructed from the Pauli (raw) set\n'
elif self.options_dict.get('pauli_values', False):
title = 'Density matrix reconstructed from the Pauli set\n'
elif self.options_dict.get('mle', False):
title = 'Maximum likelihood fit of the density matrix\n'
elif self.options_dict.get('it_mle', False):
title = 'Iterative maximum likelihood fit of the density matrix\n'
else:
title = 'Least squares fit of the density matrix\n'
empty_artist = mpl.patches.Rectangle((0, 0), 0, 0, visible=False)
legend_entries = [(empty_artist,
r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity']))]
if rho_target is not None:
legend_entries += [
(empty_artist, r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity']))]
if d == 4:
legend_entries += [
(empty_artist, r'Concurrence, $C = {:.2f}$'.format(
self.proc_data_dict['concurrence']))]
meas_string = self.base_analysis.\
raw_data_dict['measurementstring']
if isinstance(meas_string, list):
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['density_matrix'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(self.proc_data_dict['rho'].full()),
'zrange': (0, 1),
'color': color,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': (title + self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'do_legend': True,
'legend_entries': legend_entries,
'legend_kws': dict(loc='upper left', bbox_to_anchor=(0, 0.94))
}
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
if rho_target.type == 'ket':
rho_target = rho_target * rho_target.dag()
elif rho_target.type == 'bra':
rho_target = rho_target.dag() * rho_target
self.plot_dicts['density_matrix_target'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(rho_target.full()),
'zrange': (0, 1),
'color': (0.5 * np.angle(rho_target.full()) / np.pi) % 1.,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': ('Target density matrix\n' +
self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'bar_kws': dict(zorder=1),
}
def generate_raw_pauli_set(self):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
pauli_raw_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(self.proc_data_dict['meas_operators'],
self.proc_data_dict['meas_results']):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_raw_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_raw_values
def generate_corr_pauli_set(self,Fs,rotations):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
Fs_corr = []
assign_corr = []
for i,F in enumerate(Fs):
new_op = np.zeros(2**nr_qubits)
new_op[i] = 1
Fs_corr.append(qtp.Qobj(np.diag(new_op)))
assign_corr.append(np.diag(F.full()))
pauli_Fs = tomo.rotated_measurement_operators(rotations, Fs_corr)
pauli_Fs = list(itertools.chain(*np.array(pauli_Fs, dtype=np.object).T))
mus = self.proc_data_dict['meas_results']
pauli_mus = np.reshape(mus,[-1,2**nr_qubits])
for i,raw_mus in enumerate(pauli_mus):
pauli_mus[i] = np.matmul(np.linalg.inv(assign_corr),np.array(raw_mus))
pauli_mus = pauli_mus.flatten()
pauli_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(pauli_Fs,pauli_mus):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_values
def prepare_pauli_basis_plot(self):
yexp = tomo.density_matrix_to_pauli_basis(self.proc_data_dict['rho'])
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
labels = list(itertools.product(*[['I', 'X', 'Y', 'Z']]*nr_qubits))
labels = [''.join(label_list) for label_list in labels]
if nr_qubits == 1:
order = [1, 2, 3]
elif nr_qubits == 2:
order = [1, 2, 3, 4, 8, 12, 5, 6, 7, 9, 10, 11, 13, 14, 15]
elif nr_qubits == 3:
order = [1, 2, 3, 4, 8, 12, 16, 32, 48] + \
[5, 6, 7, 9, 10, 11, 13, 14, 15] + \
[17, 18, 19, 33, 34, 35, 49, 50, 51] + \
[20, 24, 28, 36, 40, 44, 52, 56, 60] + \
[21, 22, 23, 25, 26, 27, 29, 30, 31] + \
[37, 38, 39, 41, 42, 43, 45, 46, 47] + \
[53, 54, 55, 57, 58, 59, 61, 62, 63]
else:
order = np.arange(4**nr_qubits)[1:]
if self.options_dict.get('pauli_raw', False):
fit_type = 'raw counts'
elif self.options_dict.get('pauli_values', False):
fit_type = 'corrected counts'
elif self.options_dict.get('mle', False):
fit_type = 'maximum likelihood estimation'
elif self.options_dict.get('imle', False):
fit_type = 'iterative maximum likelihood estimation'
else:
fit_type = 'least squares fit'
meas_string = self.base_analysis. \
raw_data_dict['measurementstring']
if np.ndim(meas_string) > 0:
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['pauli_basis'] = {
'plotfn': self.plot_bar,
'xcenters': np.arange(len(order)),
'xwidth': 0.4,
'xrange': (-1, len(order)),
'yvals': np.array(yexp)[order],
'xlabel': r'Pauli operator, $\hat{O}$',
'ylabel': r'Expectation value, $\mathrm{Tr}(\hat{O} \hat{\rho})$',
'title': 'Pauli operators, ' + fit_type + '\n' +
self.raw_data_dict['timestamp'] + ' ' + meas_string,
'yrange': (-1.1, 1.1),
'xtick_loc': np.arange(4**nr_qubits - 1),
'xtick_rotation': 90,
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(zorder=10),
'setlabel': 'Fit to experiment',
'do_legend': True
}
if nr_qubits > 2:
self.plot_dicts['pauli_basis']['plotsize'] = (10, 5)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
ytar = tomo.density_matrix_to_pauli_basis(rho_target)
self.plot_dicts['pauli_basis_target'] = {
'plotfn': self.plot_bar,
'ax_id': 'pauli_basis',
'xcenters': np.arange(len(order)),
'xwidth': 0.8,
'yvals': np.array(ytar)[order],
'xtick_loc': np.arange(len(order)),
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(color='0.8', zorder=0),
'setlabel': 'Target values',
'do_legend': True
}
purity_str = r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity'])
if rho_target is not None:
fidelity_str = '\n' + r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity'])
else:
fidelity_str = ''
if self.proc_data_dict['d'] == 4:
concurrence_str = '\n' + r'Concurrence, $C = {:.1f}\%$'.format(
100 * self.proc_data_dict['concurrence'])
else:
concurrence_str = ''
self.plot_dicts['pauli_info_labels'] = {
'ax_id': 'pauli_basis',
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'line_kws': {'alpha': 0},
'setlabel': purity_str + fidelity_str,
'do_legend': True
}
def default_phase_cmap(self):
cols = np.array(((41, 39, 231), (61, 130, 163), (208, 170, 39),
(209, 126, 4), (181, 28, 20), (238, 76, 152),
(251, 130, 242), (162, 112, 251))) / 255
n = len(cols)
cdict = {
'red': [[i/n, cols[i%n][0], cols[i%n][0]] for i in range(n+1)],
'green': [[i/n, cols[i%n][1], cols[i%n][1]] for i in range(n+1)],
'blue': [[i/n, cols[i%n][2], cols[i%n][2]] for i in range(n+1)],
}
return mpl.colors.LinearSegmentedColormap('DMDefault', cdict)
class ReadoutROPhotonsAnalysis(Single_Qubit_TimeDomainAnalysis):
"""
Analyses the photon number in the RO based on the
readout_photons_in_resonator function
function specific options for options dict:
f_qubit
chi
artif_detuning
print_fit_results
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
close_figs: bool=False, options_dict: dict=None,
extract_only: bool=False, do_fitting: bool=False,
auto: bool=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, label=label,
extract_only=extract_only, do_fitting=do_fitting)
if self.options_dict.get('TwoD', None) is None:
self.options_dict['TwoD'] = True
self.label = label
self.params_dict = {
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'sweep_points_2D': 'sweep_points_2D',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = self.options_dict.get('numeric_params',
OrderedDict())
self.kappa = self.options_dict.get('kappa_effective', None)
self.chi = self.options_dict.get('chi', None)
self.T2 = self.options_dict.get('T2echo', None)
self.artif_detuning = self.options_dict.get('artif_detuning', 0)
if (self.kappa is None) or (self.chi is None) or (self.T2 is None):
raise ValueError('kappa_effective, chi and T2echo must be passed to '
'the options_dict.')
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
self.proc_data_dict['qubit_state'] = [[],[]]
self.proc_data_dict['delay_to_relax'] = self.raw_data_dict[
'sweep_points_2D'][0]
self.proc_data_dict['ramsey_times'] = []
for i,x in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])):
self.proc_data_dict['qubit_state'][0].append([])
self.proc_data_dict['qubit_state'][1].append([])
for j,y in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])[i]):
if j%2 == 0:
self.proc_data_dict['qubit_state'][0][i].append(y)
else:
self.proc_data_dict['qubit_state'][1][i].append(y)
for i,x in enumerate( self.raw_data_dict['sweep_points'][0]):
if i % 2 == 0:
self.proc_data_dict['ramsey_times'].append(x)
#I STILL NEED to pass Chi
def prepare_fitting(self):
self.proc_data_dict['photon_number'] = [[],[]]
self.proc_data_dict['fit_results'] = []
self.proc_data_dict['ramsey_fit_results'] = [[],[]]
for i,tau in enumerate(self.proc_data_dict['delay_to_relax']):
self.proc_data_dict['ramsey_fit_results'][0].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][0][i][:-4]/
max(self.proc_data_dict['qubit_state'][0][i][:-4]),
state=0,
kw=self.options_dict))
self.proc_data_dict['ramsey_fit_results'][1].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][1][i][:-4]/
max(self.proc_data_dict['qubit_state'][1][i][:-4]),
state=1,
kw=self.options_dict))
n01 = self.proc_data_dict['ramsey_fit_results'
][0][i][0].params['n0'].value
n02 = self.proc_data_dict['ramsey_fit_results'
][1][i][0].params['n0'].value
self.proc_data_dict['photon_number'][0].append(n01)
self.proc_data_dict['photon_number'][1].append(n02)
def run_fitting(self):
print_fit_results = self.params_dict.pop('print_fit_results',False)
exp_dec_mod = lmfit.Model(fit_mods.ExpDecayFunc)
exp_dec_mod.set_param_hint('n',
value=1,
vary=False)
exp_dec_mod.set_param_hint('offset',
value=0,
min=0,
vary=True)
exp_dec_mod.set_param_hint('tau',
value=self.proc_data_dict[
'delay_to_relax'][-1],
min=1e-11,
vary=True)
exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
params = exp_dec_mod.make_params()
self.fit_res = OrderedDict()
self.fit_res['ground_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][0],
params=params,
t=self.proc_data_dict['delay_to_relax'])
self.fit_res['excited_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][1],
params=params,
t=self.proc_data_dict['delay_to_relax'])
if print_fit_results:
print(self.fit_res['ground_state'].fit_report())
print(self.fit_res['excited_state'].fit_report())
def fit_Ramsey(self, x, y, state, **kw):
x = np.array(x)
y = np.array(y)
exp_dec_p_mod = lmfit.Model(fit_mods.ExpDecayPmod)
comb_exp_dec_mod = lmfit.Model(fit_mods.CombinedOszExpDecayFunc)
average = np.mean(y)
ft_of_data = np.fft.fft(y)
index_of_fourier_maximum = np.argmax(np.abs(
ft_of_data[1:len(ft_of_data) // 2])) + 1
max_ramsey_delay = x[-1] - x[0]
fft_axis_scaling = 1 / max_ramsey_delay
freq_est = fft_axis_scaling * index_of_fourier_maximum
n_est = (freq_est-self.artif_detuning)/(2 * self.chi)
exp_dec_p_mod.set_param_hint('T2echo',
value=self.T2,
vary=False)
exp_dec_p_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('kappa',
value=self.kappa[state],
vary=False)
exp_dec_p_mod.set_param_hint('chi',
value=self.chi,
vary=False)
exp_dec_p_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau',
value=self.T2,
vary=True)
comb_exp_dec_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('oscillation_offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau_gauss',
value=self.kappa[state],
vary=True)
comb_exp_dec_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
comb_exp_dec_mod.set_param_hint('chi',
value=self.chi,
vary=False)
if (np.average(y[:4]) >
np.average(y[4:8])):
phase_estimate = 0
else:
phase_estimate = np.pi
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
amplitude_guess = 0.5
if np.all(np.logical_and(y >= 0, y <= 1)):
exp_dec_p_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
else:
print('data is not normalized, varying amplitude')
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
fit_res_1 = exp_dec_p_mod.fit(data=y,
t=x,
params= exp_dec_p_mod.make_params())
fit_res_2 = comb_exp_dec_mod.fit(data=y,
t=x,
params= comb_exp_dec_mod.make_params())
if fit_res_1.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [exp_dec_p_mod.fit(
data=y,
t=x,
params= exp_dec_p_mod.make_params())]
chisqr_lst = [fit_res_1.chisqr for fit_res_1 in fit_res_lst]
fit_res_1 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_2.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [comb_exp_dec_mod.fit(
data=y,
t=x,
params= comb_exp_dec_mod.make_params())]
chisqr_lst = [fit_res_2.chisqr for fit_res_2 in fit_res_lst]
fit_res_2 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_1.chisqr < fit_res_2.chisqr:
self.proc_data_dict['params'] = exp_dec_p_mod.make_params()
return [fit_res_1,fit_res_1,fit_res_2]
else:
self.proc_data_dict['params'] = comb_exp_dec_mod.make_params()
return [fit_res_2,fit_res_1,fit_res_2]
def prepare_plots(self):
self.prepare_2D_sweep_plot()
self.prepare_photon_number_plot()
self.prepare_ramsey_plots()
def prepare_2D_sweep_plot(self):
self.plot_dicts['off_full_data_'+self.label] = {
'title': 'Raw data |g>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][0]) }
self.plot_dicts['on_full_data_'+self.label] = {
'title': 'Raw data |e>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][1]) }
def prepare_ramsey_plots(self):
x_fit = np.linspace(self.proc_data_dict['ramsey_times'][0],
max(self.proc_data_dict['ramsey_times']),101)
for i in range(len(self.proc_data_dict['ramsey_fit_results'][0])):
self.plot_dicts['off_'+str(i)] = {
'title': 'Ramsey w t_delay = '+\
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][0][i]/
max(self.proc_data_dict['qubit_state'][0][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|g> data_'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_g_'+str(i)] = {
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][0][i]),
'do_legend': True }
self.plot_dicts['on_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][1][i]/
max(self.proc_data_dict['qubit_state'][1][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|e> data_'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_e_'+str(i)] = {
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][1][i]),
'do_legend': True }
def prepare_photon_number_plot(self):
ylabel = 'Average photon number'
yunit = ''
x_fit = np.linspace(min(self.proc_data_dict['delay_to_relax']),
max(self.proc_data_dict['delay_to_relax']),101)
minmax_data = [min(min(self.proc_data_dict['photon_number'][0]),
min(self.proc_data_dict['photon_number'][1])),
max(max(self.proc_data_dict['photon_number'][0]),
max(self.proc_data_dict['photon_number'][1]))]
minmax_data[0] -= minmax_data[0]/5
minmax_data[1] += minmax_data[1]/5
self.proc_data_dict['photon_number'][1],
self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit)
self.plot_dicts['Photon number count'] = {
'plotfn': self.plot_line,
'xlabel': 'Delay after first RO-pulse',
'ax_id': 'Photon number count ',
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][0],
'ylabel': ylabel,
'yunit': yunit,
'yrange': minmax_data,
'title': 'Residual photon number',
'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '|g> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main2'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': x_fit,
'yvals': self.fit_res['ground_state'].eval(
self.fit_res['ground_state'].params,
t=x_fit),
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'b',
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main3'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][1],
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'r',
'linestyle': '',
'marker': 'o',
'setlabel': '|e> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main4'] = {
'plotfn': self.plot_line,
'xunit': 's',
'ax_id': 'Photon number count ',
'xvals': x_fit,
'yvals': self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit),
'yrange': minmax_data,
'ylabel': ylabel,
'color': 'r',
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['hidden_1'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_g = '
''+str("%.3f" %
(self.fit_res['ground_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True }
self.plot_dicts['hidden_2'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_e = '
''+str("%.3f" %
(self.fit_res['excited_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True}
class RODynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names: list=None, t_start: str=None, t_stop: str=None,
data_file_path: str=None, single_timestamp: bool=False,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
auto=False)
if auto:
self.run_analysis()
def process_data(self):
super().process_data()
if 'qbp_name' in self.metadata:
self.pulsed_qbname = self.metadata['qbp_name']
else:
self.pulsed_qbname = self.options_dict.get('pulsed_qbname')
self.measured_qubits = [qbn for qbn in self.channel_map if
qbn != self.pulsed_qbname]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.measured_qubits:
ro_dict = self.proc_data_dict['projected_data_dict'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
for ro_suff, data in ro_dict.items():
cos_mod = lmfit.Model(fit_mods.CosFunc)
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data)
guess_pars['amplitude'].vary = True
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
key = 'cos_fit_{}{}'.format(qbn, ro_suff)
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.dynamic_phases = OrderedDict()
for meas_qbn in self.measured_qubits:
self.dynamic_phases[meas_qbn] = \
(self.fit_dicts['cos_fit_{}_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'] -
self.fit_dicts['cos_fit_{}_ref_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'])*180/np.pi
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for meas_qbn in self.measured_qubits:
sweep_points_dict = self.proc_data_dict['sweep_points_dict'][
meas_qbn]
if self.num_cal_points != 0:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][:-self.num_cal_points],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][:-self.num_cal_points]]
sweep_points = sweep_points_dict['msmt_sweep_points']
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
key = list(self.cal_states_dict)[i] + meas_qbn
self.plot_dicts[key] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_line,
'xvals': np.mean([
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs],
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs]],
axis=0),
'yvals': np.mean([
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][cal_pts_idxs],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][cal_pts_idxs]],
axis=0),
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
else:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure']]
sweep_points = sweep_points_dict['sweep_points']
self.plot_dicts['dyn_phase_plot_' + meas_qbn] = {
'plotfn': self.plot_line,
'xvals': [sweep_points, sweep_points],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': ['with measurement', 'no measurement'],
'title': (self.raw_data_dict['timestamps'][0] + ' ' +
self.raw_data_dict['measurementstring'][0]),
'linestyle': 'none',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_ref_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_ref_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
textstr = 'Dynamic phase = {:.2f}'.format(
self.dynamic_phases[meas_qbn]) + r'$^{\circ}$'
self.plot_dicts['text_msg_' + meas_qbn] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'ypos': -0.175,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxAmplitudeSweepAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
self.mask_freq = kwargs.pop('mask_freq', None)
self.mask_amp = kwargs.pop('mask_amp', None)
super().__init__(qb_names, *args, **kwargs)
def extract_data(self):
super().extract_data()
# Set some default values specific to FluxPulseScopeAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('rotation_type', default_value=None) is None:
self.options_dict['rotation_type'] = 'global_PCA'
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_sp = {qb: len(pdd['sweep_points_dict'][qb]['sweep_points'])
for qb in self.qb_names}
nr_sp2d = {qb: len(list(pdd['sweep_points_2D_dict'][qb].values())[0])
for qb in self.qb_names}
nr_cp = self.num_cal_points
# make matrix out of vector
data_reshaped = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb]).T.flatten(), (nr_sp[qb], nr_sp2d[qb]))
for qb in self.qb_names}
pdd['data_reshaped'] = data_reshaped
# remove calibration points from data to fit
data_no_cp = {qb: np.array([pdd['data_reshaped'][qb][i, :]
for i in range(nr_sp[qb]-nr_cp)])
for qb in self.qb_names}
# apply mask
for qb in self.qb_names:
if self.mask_freq is None:
self.mask_freq = [True]*nr_sp2d[qb] # by default, no point is masked
if self.mask_amp is None:
self.mask_amp = [True]*(nr_sp[qb]-nr_cp)
pdd['freqs_masked'] = {}
pdd['amps_masked'] = {}
pdd['data_masked'] = {}
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
pdd['freqs_masked'][qb] = \
pdd['sweep_points_2D_dict'][qb][sp_param][self.mask_freq]
pdd['amps_masked'][qb] = \
pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points][self.mask_amp]
data_masked = data_no_cp[qb][self.mask_amp,:]
pdd['data_masked'][qb] = data_masked[:, self.mask_freq]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
# Gaussian fit of amplitude slices
gauss_mod = fit_mods.GaussianModel_v2()
for qb in self.qb_names:
for i in range(len(pdd['amps_masked'][qb])):
data = pdd['data_masked'][qb][i,:]
self.fit_dicts[f'gauss_fit_{qb}_{i}'] = {
'model': gauss_mod,
'fit_xvals': {'x': pdd['freqs_masked'][qb]},
'fit_yvals': {'data': data}
}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['gauss_center'] = {}
pdd['gauss_center_err'] = {}
pdd['filtered_center'] = {}
pdd['filtered_amps'] = {}
for qb in self.qb_names:
pdd['gauss_center'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].best_values['center']
for i in range(len(pdd['amps_masked'][qb]))])
pdd['gauss_center_err'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].params['center'].stderr
for i in range(len(pdd['amps_masked'][qb]))])
# filter out points with stderr > 1e6 Hz
pdd['filtered_center'][qb] = np.array([])
pdd['filtered_amps'][qb] = np.array([])
for i, stderr in enumerate(pdd['gauss_center_err'][qb]):
try:
if stderr < 1e6:
pdd['filtered_center'][qb] = \
np.append(pdd['filtered_center'][qb],
pdd['gauss_center'][qb][i])
pdd['filtered_amps'][qb] = \
np.append(pdd['filtered_amps'][qb],
pdd['sweep_points_dict'][qb]\
['sweep_points'][:-self.num_cal_points][i])
except:
continue
# if gaussian fitting does not work (i.e. all points were filtered
# out above) use max value of data to get an estimate of freq
if len(pdd['filtered_amps'][qb]) == 0:
for qb in self.qb_names:
freqs = np.array([])
for i in range(pdd['data_masked'][qb].shape[0]):
freqs = np.append(freqs, pdd['freqs_masked'][qb]\
[np.argmax(pdd['data_masked'][qb][i,:])])
pdd['filtered_center'][qb] = freqs
pdd['filtered_amps'][qb] = pdd['amps_masked'][qb]
# fit the freqs to the qubit model
self.fit_func = self.get_param_value('fit_func', fit_mods.Qubit_dac_to_freq)
if self.fit_func == fit_mods.Qubit_dac_to_freq_precise:
fit_guess_func = fit_mods.Qubit_dac_arch_guess_precise
else:
fit_guess_func = fit_mods.Qubit_dac_arch_guess
freq_mod = lmfit.Model(self.fit_func)
fixed_params = \
self.get_param_value("fixed_params_for_fit", {}).get(qb, None)
if fixed_params is None:
fixed_params = dict(E_c=0)
freq_mod.guess = fit_guess_func.__get__(
freq_mod, freq_mod.__class__)
self.fit_dicts[f'freq_fit_{qb}'] = {
'model': freq_mod,
'fit_xvals': {'dac_voltage': pdd['filtered_amps'][qb]},
'fit_yvals': {'data': pdd['filtered_center'][qb]},
"guessfn_pars": {"fixed_params": fixed_params}}
self.run_fitting()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_colorxy,
'xvals': pdd['sweep_points_dict'][qb]['sweep_points'],
'yvals': pdd['sweep_points_2D_dict'][qb][sp_param],
'zvals': np.transpose(pdd['data_reshaped'][qb]),
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'zlabel': 'Excited state population',
}
if self.do_fitting:
if self.options_dict.get('scatter', True):
label = f'freq_scatter_{qb}_scatter'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '',
'marker': 'o',
'xvals': pdd['filtered_amps'][qb],
'yvals': pdd['filtered_center'][qb],
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'color': 'white',
}
amps = pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points]
label = f'freq_scatter_{qb}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '-',
'marker': '',
'xvals': amps,
'yvals': self.fit_func(amps,
**self.fit_res[f'freq_fit_{qb}'].best_values),
'color': 'red',
}
class T1FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
self.lengths = OrderedDict()
self.amps = OrderedDict()
self.freqs = OrderedDict()
for qbn in self.qb_names:
len_key = [pn for pn in self.mospm[qbn] if 'length' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse length.')
self.lengths[qbn] = self.sp.get_sweep_params_property(
'values', 0, len_key[0])
amp_key = [pn for pn in self.mospm[qbn] if 'amp' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse amplitude.')
self.amps[qbn] = self.sp.get_sweep_params_property(
'values', 1, amp_key[0])
freq_key = [pn for pn in self.mospm[qbn] if 'freq' in pn]
if len(freq_key) == 0:
self.freqs[qbn] = None
else:
self.freqs[qbn] =self.sp.get_sweep_params_property(
'values', 1, freq_key[0])
nr_amps = len(self.amps[self.qb_names[0]])
nr_lengths = len(self.lengths[self.qb_names[0]])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
exp_mod = fit_mods.ExponentialModel()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped_no_cp'][qb]):
self.fit_dicts[f'exp_fit_{qb}_amp_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.lengths[qb]},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T1'] = {}
pdd['T1_err'] = {}
for qb in self.qb_names:
pdd['T1'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_amp_{i}'].best_values['decay'])
for i in range(len(self.amps[qb]))])
pdd['T1_err'][qb] = np.array([
self.fit_res[f'exp_fit_{qb}_amp_{i}'].params['decay'].stderr
for i in range(len(self.amps[qb]))])
for i in range(len(self.amps[qb])):
try:
if pdd['T1_err'][qb][i] >= 10 * pdd['T1'][qb][i]:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
for p, param_values in enumerate([self.amps, self.freqs]):
if param_values is None:
continue
suffix = '_amp' if p == 0 else '_freq'
mask = pdd['mask'][qb]
xlabel = r'Flux pulse amplitude' if p == 0 else \
r'Derived qubit frequency'
if self.do_fitting:
# Plot T1 vs flux pulse amplitude
label = f'T1_fit_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': pdd['T1'][qb][mask],
'yerr': pdd['T1_err'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'T1',
'yunit': 's',
'color': 'blue',
}
# Plot rotated integrated average in dependece of flux pulse
# amplitude and length
label = f'T1_color_plot_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': self.lengths[qb],
'zvals': np.transpose(pdd['data_reshaped_no_cp'][qb][mask]),
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Flux pulse length',
'yunit': 's',
'zlabel': r'Excited state population'
}
# Plot population loss for the first flux pulse length as a
# function of flux pulse amplitude
label = f'Pop_loss_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': 1 - pdd['data_reshaped_no_cp'][qb][:, 0][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Pop. loss @ {:.0f} ns'.format(
self.lengths[qb][0]/1e-9
),
'yunit': '',
}
# Plot all fits in single figure
if self.options_dict.get('all_fits', False) and self.do_fitting:
colormap = self.options_dict.get('colormap', mpl.cm.Blues)
for i in range(len(self.amps[qb])):
color = colormap(i/(len(self.amps[qb])-1))
label = f'exp_fit_{qb}_amp_{i}'
fitid = param_values[qb][i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'fig_id': f'T1_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'fig_id': f'T1_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.lengths[qb],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i, :],
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
}
class T2FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
nr_amps = len(self.metadata['amplitudes'])
nr_lengths = len(self.metadata['flux_lengths'])
nr_phases = len(self.metadata['phases'])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(
deepcopy(pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths, nr_phases)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
if self.metadata['use_cal_points']:
pdd['cal_point_data'] = {qb: deepcopy(
pdd['data_to_fit'][qb][
len(pdd['data_to_fit'][qb])-nr_cp:]) for qb in self.qb_names}
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
for i in range(nr_amps):
for j, data in enumerate(pdd['data_reshaped_no_cp'][qb][i]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.metadata['phases'],
data=data,
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}_{j}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.metadata['phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T2'] = {}
pdd['T2_err'] = {}
pdd['phase_contrast'] = {}
nr_lengths = len(self.metadata['flux_lengths'])
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
pdd['phase_contrast'][qb] = {}
exp_mod = fit_mods.ExponentialModel()
for i in range(nr_amps):
pdd['phase_contrast'][qb][f'amp_{i}'] = np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])
self.fit_dicts[f'exp_fit_{qb}_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.metadata['flux_lengths']},
'fit_yvals': {'data': np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])}}
self.run_fitting()
pdd['T2'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_{i}'].best_values['decay'])
for i in range(len(self.metadata['amplitudes']))])
pdd['mask'][qb] = []
for i in range(len(self.metadata['amplitudes'])):
try:
if self.fit_res[f'exp_fit_{qb}_{i}']\
.params['decay'].stderr >= 1e-5:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
mask = pdd['mask'][qb]
label = f'T2_fit_{qb}'
xvals = self.metadata['amplitudes'][mask] if \
self.metadata['frequencies'] is None else \
self.metadata['frequencies'][mask]
xlabel = r'Flux pulse amplitude' if \
self.metadata['frequencies'] is None else \
r'Derived qubit frequency'
self.plot_dicts[label] = {
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': xvals,
'yvals': pdd['T2'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if self.metadata['frequencies'] is None else 'Hz',
'ylabel': r'T2',
'yunit': 's',
'color': 'blue',
}
# Plot all fits in single figure
if not self.options_dict.get('all_fits', False):
continue
colormap = self.options_dict.get('colormap', mpl.cm.Blues)
for i in range(len(self.metadata['amplitudes'])):
color = colormap(i/(len(self.metadata['frequencies'])-1))
label = f'exp_fit_{qb}_amp_{i}'
freqs = self.metadata['frequencies'] is not None
fitid = self.metadata.get('frequencies',
self.metadata['amplitudes'])[i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'T2_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'T2_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.metadata['phases'],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i,:],
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
}
class MeasurementInducedDephasingAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
rdd = self.raw_data_dict
pdd = self.proc_data_dict
pdd['data_reshaped'] = {qb: [] for qb in pdd['data_to_fit']}
pdd['amps_reshaped'] = np.unique(self.metadata['hard_sweep_params']['ro_amp_scale']['values'])
pdd['phases_reshaped'] = []
for amp in pdd['amps_reshaped']:
mask = self.metadata['hard_sweep_params']['ro_amp_scale']['values'] == amp
pdd['phases_reshaped'].append(self.metadata['hard_sweep_params']['phase']['values'][mask])
for qb in self.qb_names:
pdd['data_reshaped'][qb].append(pdd['data_to_fit'][qb][:len(mask)][mask])
def prepare_fitting(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['phases_reshaped'][i],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['phases_reshaped'][i]},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['sigma'] = {}
pdd['sigma_err'] = {}
pdd['a'] = {}
pdd['a_err'] = {}
pdd['c'] = {}
pdd['c_err'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] += np.pi * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + np.pi) % (2 * np.pi) - np.pi
pdd['phase_offset'][qb] = 180*np.unwrap(pdd['phase_offset'][qb])/np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
gauss_mod = lmfit.models.GaussianModel()
self.fit_dicts[f'phase_contrast_fit_{qb}'] = {
'model': gauss_mod,
'guess_dict': {'center': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_contrast'][qb]}}
quadratic_mod = lmfit.models.QuadraticModel()
self.fit_dicts[f'phase_offset_fit_{qb}'] = {
'model': quadratic_mod,
'guess_dict': {'b': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_offset'][qb]}}
self.run_fitting()
self.save_fit_results()
pdd['sigma'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].best_values['sigma']
pdd['sigma_err'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].params['sigma']. \
stderr
pdd['a'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['a']
pdd['a_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['a'].stderr
pdd['c'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['c']
pdd['c_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['c'].stderr
pdd['sigma_err'][qb] = float('nan') if pdd['sigma_err'][qb] is None \
else pdd['sigma_err'][qb]
pdd['a_err'][qb] = float('nan') if pdd['a_err'][qb] is None else pdd['a_err'][qb]
pdd['c_err'][qb] = float('nan') if pdd['c_err'][qb] is None else pdd['c_err'][qb]
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
phases_equal = True
for phases in pdd['phases_reshaped'][1:]:
if not np.all(phases == pdd['phases_reshaped'][0]):
phases_equal = False
break
for qb in self.qb_names:
if phases_equal:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'xvals': pdd['phases_reshaped'][0],
'yvals': pdd['amps_reshaped'],
'zvals': pdd['data_reshaped'][qb],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'yunit': '',
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.Blues)
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['phases_reshaped'][i],
'yvals': pdd['data_reshaped'][qb][i],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': f'amp={amp:.4f}',
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'fit, amp={amp:.4f}',
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_fit_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*self.fit_res[f'phase_contrast_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_labels_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$\sigma = ({:.5f} \pm {:.5f})$ V'.
format(pdd['sigma'][qb], pdd['sigma_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_fit_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': self.fit_res[f'phase_offset_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_labels_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$a = {:.0f} \pm {:.0f}$ deg/V${{}}^2$'.
format(pdd['a'][qb], pdd['a_err'][qb]) + '\n' +
r'$c = {:.1f} \pm {:.1f}$ deg'.
format(pdd['c'][qb], pdd['c_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
class DriveCrosstalkCancellationAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
# get the ramsey phases as the values of the first sweep parameter
# in the 2nd sweep dimension.
# !!! This assumes all qubits have the same ramsey phases !!!
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 1)
pdd['qb_sweep_points'] = {}
pdd['qb_sweep_param'] = {}
for k, v in self.sp.get_sweep_dimension(0).items():
if k == 'phase':
continue
qb, param = k.split('.')
pdd['qb_sweep_points'][qb] = v[0]
pdd['qb_sweep_param'][qb] = (param, v[1], v[2])
pdd['qb_msmt_vals'] = {}
pdd['qb_cal_vals'] = {}
for qb, data in pdd['data_to_fit'].items():
pdd['qb_msmt_vals'][qb] = data[:, :-self.num_cal_points].reshape(
len(pdd['qb_sweep_points'][qb]), len(pdd['ramsey_phases']))
pdd['qb_cal_vals'][qb] = data[0, -self.num_cal_points:]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['ramsey_phases'],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2*self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180/np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_sweep_points'][qb],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': pdd['qb_sweep_param'][qb][2],
'yunit': pdd['qb_sweep_param'][qb][1],
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.Blues)
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel='data, ref.'
else:
legendlabel = f'data, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel = 'fit, ref.'
else:
legendlabel = f'fit, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'do_legend': False,
# 'setlabel': legendlabel
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_contrast'][qb][:-1] * 100,
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_ref_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_contrast'][qb][-1] * 100,
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_offset'][qb][:-1],
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_ref_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_offset'][qb][-1],
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
class FluxlineCrosstalkAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the measure_fluxline_crosstalk measurement.
The measurement involves Ramsey measurements on a set of crosstalk qubits,
which have been brought to a flux-sensitive position with a flux pulse.
The first dimension is the ramsey-phase of these qubits.
In the second sweep dimension, the amplitude of a flux pulse on another
(target) qubit is swept.
The analysis extracts the change in Ramsey phase offset, which gets
converted to a frequency offset due to the flux pulse on the target qubit.
The frequency offset is then converted to a flux offset, which is a measure
of the crosstalk between the target fluxline and the crosstalk qubit.
The measurement is hard-compressed, meaning the raw data is inherently 1d,
with one set of calibration points as the final segments. The experiment
part of the measured values are reshaped to the correct 2d shape for
the analysis. The sweep points passed into the analysis should still reflect
the 2d nature of the measurement, meaning the ramsey phase values should be
passed in the first dimension and the target fluxpulse amplitudes in the
second sweep dimension.
"""
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for param in ['fit_ge_freq_from_flux_pulse_amp',
'fit_ge_freq_from_dc_offset',
'flux_amplitude_bias_ratio',
'flux_parking']:
params_dict.update({
f'{qbn}.{param}': f'Instrument settings.{qbn}.{param}'
for qbn in qb_names})
kwargs['params_dict'] = kwargs.get('params_dict', {})
kwargs['params_dict'].update(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 0)
pdd['target_amps'] = self.sp.get_sweep_params_property('values', 1)
pdd['target_fluxpulse_length'] = \
self.get_param_value('target_fluxpulse_length')
pdd['crosstalk_qubits_amplitudes'] = \
self.get_param_value('crosstalk_qubits_amplitudes')
pdd['qb_msmt_vals'] = {qb:
pdd['data_to_fit'][qb][:, :-self.num_cal_points].reshape(
len(pdd['target_amps']), len(pdd['ramsey_phases']))
for qb in self.qb_names}
pdd['qb_cal_vals'] = {
qb: pdd['data_to_fit'][qb][0, -self.num_cal_points:]
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
cos_mod = lmfit.Model(fit_mods.CosFunc)
cos_mod.guess = fit_mods.Cos_guess.__get__(cos_mod, cos_mod.__class__)
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'model': cos_mod,
'guess_dict': {'frequency': {'value': 1 / 360,
'vary': False}},
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['freq_offset'] = {}
pdd['freq'] = {}
self.skip_qb_freq_fits = self.get_param_value('skip_qb_freq_fits', False)
self.vfc_method = self.get_param_value('vfc_method', 'transmon_res')
if not self.skip_qb_freq_fits:
pdd['flux'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2 * self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180 / np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_offset'][qb] = \
np.unwrap(pdd['phase_offset'][qb] / 180 * np.pi) * 180 / np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
pdd['freq_offset'][qb] = pdd['phase_offset'][qb] / 360 / pdd[
'target_fluxpulse_length']
startval_slope = (pdd['freq_offset'][qb][-1] - pdd['freq_offset'][
qb][0]) / (pdd['target_amps'][-1] - pdd['target_amps'][0])
startval_offset = pdd['freq_offset'][qb][
len(pdd['freq_offset'][qb]) // 2]
fr = lmfit.Model(lambda a,
f_a=startval_slope,
f0=startval_offset: a * f_a + f0).fit(
data=pdd['freq_offset'][qb], a=pdd['target_amps'])
pdd['freq_offset'][qb] -= fr.best_values['f0']
if not self.skip_qb_freq_fits:
if self.vfc_method == 'approx':
mpars = self.raw_data_dict[
f'{qb}.fit_ge_freq_from_flux_pulse_amp']
freq_pulsed_no_crosstalk = fit_mods.Qubit_dac_to_freq(
pdd['crosstalk_qubits_amplitudes'].get(qb, 0), **mpars)
pdd['freq'][qb] = pdd['freq_offset'][
qb] + freq_pulsed_no_crosstalk
mpars.update({'V_per_phi0': 1, 'dac_sweet_spot': 0})
pdd['flux'][qb] = fit_mods.Qubit_freq_to_dac(
pdd['freq'][qb], **mpars)
else:
mpars = self.get_param_value(
f'{qb}.fit_ge_freq_from_dc_offset')
ratio = self.get_param_value(
f'{qb}.flux_amplitude_bias_ratio')
flux_parking = self.get_param_value(
f'{qb}.flux_parking')
bias = (mpars['dac_sweet_spot']
+ mpars['V_per_phi0'] * flux_parking)
amp = pdd['crosstalk_qubits_amplitudes'].get(qb, 0)
freq_pulsed_no_crosstalk = fit_mods.Qubit_dac_to_freq_res(
(bias + amp / ratio), **mpars)
pdd['freq'][qb] = pdd['freq_offset'][qb] + freq_pulsed_no_crosstalk
# mpars.update({'V_per_phi0': 1, 'dac_sweet_spot': 0})
volt = fit_mods.Qubit_freq_to_dac_res(
pdd['freq'][qb], **mpars,
branch=(bias + amp / ratio))
pdd['flux'][qb] = (volt - mpars['dac_sweet_spot']) \
/ mpars['V_per_phi0'] # convert volt to flux
# fit fitted results to linear models
lin_mod = lmfit.Model(lambda x, a=1, b=0: a*x + b)
def guess(model, data, x, **kwargs):
a_guess = (data[-1] - data[0])/(x[-1] - x[0])
b_guess = data[0] - x[0]*a_guess
return model.make_params(a=a_guess, b=b_guess)
lin_mod.guess = guess.__get__(lin_mod, lin_mod.__class__)
keys_to_fit = []
for qb in self.qb_names:
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
key = f'{param}_fit_{qb}'
self.fit_dicts[key] = {
'model': lin_mod,
'fit_xvals': {'x': pdd['target_amps']},
'fit_yvals': {'data': pdd[param][qb]}}
keys_to_fit.append(key)
self.run_fitting(keys_to_fit=keys_to_fit)
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['target_amps'],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'yunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'data, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'fit, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': legendlabel,
'do_legend': False,
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_contrast'][qb] * 100,
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
# Frequency offset
self.plot_dicts[f'freq_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'freq_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['freq_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Freq. offset, $\\Delta f$',
'yunit': 'Hz',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
if not self.skip_qb_freq_fits:
# Flux
self.plot_dicts[f'flux_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'flux_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['flux'][qb],
'xlabel': self.sp[1]['target_amp'][2],
'xunit': self.sp[1]['target_amp'][1],
'ylabel': 'Flux, $\\Phi$',
'yunit': '$\\Phi_0$',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
self.plot_dicts[f'{param}_fit_{qb}'] = {
'ax_id': f'{param}_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[f'{param}_fit_{qb}'],
'plot_init': self.options_dict.get('plot_init', False),
'linestyle': '-',
'marker': '',
'color': 'C1',
}
class RabiAnalysis(MultiQubit_TimeDomain_Analysis):
def extract_data(self):
super().extract_data()
params_dict = {}
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
s = 'Instrument settings.'+qbn
params_dict[f'{trans_name}_amp180_'+qbn] = \
s+f'.{trans_name}_amp180'
params_dict[f'{trans_name}_amp90scale_'+qbn] = \
s+f'.{trans_name}_amp90_scale'
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
def add_fit_dict(qbn, data, key, scalex=1):
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
reduction_arr = np.invert(np.isnan(data))
data = data[reduction_arr]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'][reduction_arr] * scalex
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=sweep_points, data=data)
guess_pars['amplitude'].vary = True
guess_pars['amplitude'].min = -10
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
self.set_user_guess_pars(guess_pars)
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
for qbn in self.qb_names:
all_data = self.proc_data_dict['data_to_fit'][qbn]
if self.get_param_value('TwoD'):
daa = self.metadata.get('drive_amp_adaptation', {}).get(
qbn, None)
for i, data in enumerate(all_data):
key = f'cos_fit_{qbn}_{i}'
add_fit_dict(qbn, data, key,
scalex=1 if daa is None else daa[i])
else:
add_fit_dict(qbn, all_data, 'cos_fit_' + qbn)
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for k, fit_dict in self.fit_dicts.items():
# k is of the form cos_fit_qbn_i if TwoD else cos_fit_qbn
# replace k with qbn_i or qbn
k = k.replace('cos_fit_', '')
# split into qbn and i. (k + '_') is needed because if k = qbn
# doing k.split('_') will only have one output and assignment to
# two variables will fail.
qbn, i = (k + '_').split('_')[:2]
fit_res = fit_dict['fit_res']
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
self.proc_data_dict['analysis_params_dict'][k] = \
self.get_amplitudes(fit_res=fit_res, sweep_points=sweep_points)
self.save_processed_data(key='analysis_params_dict')
def get_amplitudes(self, fit_res, sweep_points):
# Extract the best fitted frequency and phase.
freq_fit = fit_res.best_values['frequency']
phase_fit = fit_res.best_values['phase']
freq_std = fit_res.params['frequency'].stderr
phase_std = fit_res.params['phase'].stderr
# If fitted_phase<0, shift fitted_phase by 4. This corresponds to a
# shift of 2pi in the argument of cos.
if np.abs(phase_fit) < 0.1:
phase_fit = 0
# If phase_fit<1, the piHalf amplitude<0.
if phase_fit < 1:
log.info('The data could not be fitted correctly. '
'The fitted phase "%s" <1, which gives '
'negative piHalf '
'amplitude.' % phase_fit)
stepsize = sweep_points[1] - sweep_points[0]
if freq_fit > 2 * stepsize:
log.info('The data could not be fitted correctly. The '
'frequency "%s" is too high.' % freq_fit)
n = np.arange(-10, 10)
piPulse_vals = (n*np.pi - phase_fit)/(2*np.pi*freq_fit)
piHalfPulse_vals = (n*np.pi + np.pi/2 - phase_fit)/(2*np.pi*freq_fit)
# find piHalfPulse
try:
piHalfPulse = \
np.min(piHalfPulse_vals[piHalfPulse_vals >= sweep_points[0]])
n_piHalf_pulse = n[piHalfPulse_vals==piHalfPulse][0]
except ValueError:
piHalfPulse = np.asarray([])
if piHalfPulse.size == 0 or piHalfPulse > max(sweep_points):
i = 0
while (piHalfPulse_vals[i] < min(sweep_points) and
i<piHalfPulse_vals.size):
i+=1
piHalfPulse = piHalfPulse_vals[i]
n_piHalf_pulse = n[i]
# find piPulse
try:
if piHalfPulse.size != 0:
piPulse = \
np.min(piPulse_vals[piPulse_vals >= piHalfPulse])
else:
piPulse = np.min(piPulse_vals[piPulse_vals >= 0.001])
n_pi_pulse = n[piHalfPulse_vals == piHalfPulse][0]
except ValueError:
piPulse = np.asarray([])
if piPulse.size == 0:
i = 0
while (piPulse_vals[i] < min(sweep_points) and
i < piPulse_vals.size):
i += 1
piPulse = piPulse_vals[i]
n_pi_pulse = n[i]
try:
freq_idx = fit_res.var_names.index('frequency')
phase_idx = fit_res.var_names.index('phase')
if fit_res.covar is not None:
cov_freq_phase = fit_res.covar[freq_idx, phase_idx]
else:
cov_freq_phase = 0
except ValueError:
cov_freq_phase = 0
try:
piPulse_std = self.calculate_pulse_stderr(
f=freq_fit,
phi=phase_fit,
f_err=freq_std,
phi_err=phase_std,
period_const=n_pi_pulse*np.pi,
cov=cov_freq_phase)
piHalfPulse_std = self.calculate_pulse_stderr(
f=freq_fit,
phi=phase_fit,
f_err=freq_std,
phi_err=phase_std,
period_const=n_piHalf_pulse*np.pi + np.pi/2,
cov=cov_freq_phase)
except Exception:
log.warning(f'Some stderrs from fit are None, setting stderr '
f'of pi and pi/2 pulses to 0!')
piPulse_std = 0
piHalfPulse_std = 0
rabi_amplitudes = {'piPulse': piPulse,
'piPulse_stderr': piPulse_std,
'piHalfPulse': piHalfPulse,
'piHalfPulse_stderr': piHalfPulse_std}
return rabi_amplitudes
@staticmethod
def calculate_pulse_stderr(f, phi, f_err, phi_err,
period_const, cov=0):
jacobian = np.array([-1 / (2 * np.pi * f),
- (period_const - phi) / (2 * np.pi * f**2)])
cov_matrix = np.array([[phi_err**2, cov], [cov, f_err**2]])
return np.sqrt(jacobian @ cov_matrix @ jacobian.T)
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for k, fit_dict in self.fit_dicts.items():
if k.startswith('amplitude_fit'):
# This is only for RabiFrequencySweepAnalysis.
# It is handled by prepare_amplitude_fit_plots of that class
continue
# k is of the form cos_fit_qbn_i if TwoD else cos_fit_qbn
# replace k with qbn_i or qbn
k = k.replace('cos_fit_', '')
# split into qbn and i. (k + '_') is needed because if k = qbn
# doing k.split('_') will only have one output and assignment to
# two variables will fail.
qbn, i = (k + '_').split('_')[:2]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points']
first_sweep_param = self.get_first_sweep_param(
qbn, dimension=1)
if len(i) and first_sweep_param is not None:
# TwoD
label, unit, vals = first_sweep_param
title_suffix = (f'{i}: {label} = ' + ' '.join(
SI_val_to_msg_str(vals[int(i)], unit,
return_type=lambda x : f'{x:0.4f}')))
daa = self.metadata.get('drive_amp_adaptation', {}).get(
qbn, None)
if daa is not None:
sweep_points = sweep_points * daa[int(i)]
else:
# OneD
title_suffix = ''
fit_res = fit_dict['fit_res']
base_plot_name = 'Rabi_' + k
dtf = self.proc_data_dict['data_to_fit'][qbn]
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=dtf[int(i)] if i != '' else dtf,
sweep_points=sweep_points,
plot_name_suffix=qbn+'fit',
qb_name=qbn, TwoD=False,
title_suffix=title_suffix
)
self.plot_dicts['fit_' + k] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'cosine fit',
'color': 'r',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
rabi_amplitudes = self.proc_data_dict['analysis_params_dict']
self.plot_dicts['piamp_marker_' + k] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([rabi_amplitudes[k]['piPulse']]),
'yvals': np.array([fit_res.model.func(
rabi_amplitudes[k]['piPulse'],
**fit_res.best_values)]),
'setlabel': '$\pi$-Pulse amp',
'color': 'r',
'marker': 'o',
'line_kws': {'markersize': 10},
'linestyle': '',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts['piamp_hline_' + k] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': [fit_res.model.func(
rabi_amplitudes[k]['piPulse'],
**fit_res.best_values)],
'xmin': sweep_points[0],
'xmax': sweep_points[-1],
'colors': 'gray'}
self.plot_dicts['pihalfamp_marker_' + k] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([rabi_amplitudes[k]['piHalfPulse']]),
'yvals': np.array([fit_res.model.func(
rabi_amplitudes[k]['piHalfPulse'],
**fit_res.best_values)]),
'setlabel': '$\pi /2$-Pulse amp',
'color': 'm',
'marker': 'o',
'line_kws': {'markersize': 10},
'linestyle': '',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts['pihalfamp_hline_' + k] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': [fit_res.model.func(
rabi_amplitudes[k]['piHalfPulse'],
**fit_res.best_values)],
'xmin': sweep_points[0],
'xmax': sweep_points[-1],
'colors': 'gray'}
trans_name = self.get_transition_name(qbn)
old_pipulse_val = self.raw_data_dict[
f'{trans_name}_amp180_'+qbn]
if old_pipulse_val != old_pipulse_val:
old_pipulse_val = 0
old_pihalfpulse_val = self.raw_data_dict[
f'{trans_name}_amp90scale_'+qbn]
if old_pihalfpulse_val != old_pihalfpulse_val:
old_pihalfpulse_val = 0
old_pihalfpulse_val *= old_pipulse_val
textstr = (' $\pi-Amp$ = {:.3f} V'.format(
rabi_amplitudes[k]['piPulse']) +
' $\pm$ {:.3f} V '.format(
rabi_amplitudes[k]['piPulse_stderr']) +
'\n$\pi/2-Amp$ = {:.3f} V '.format(
rabi_amplitudes[k]['piHalfPulse']) +
' $\pm$ {:.3f} V '.format(
rabi_amplitudes[k]['piHalfPulse_stderr']) +
'\n $\pi-Amp_{old}$ = ' + '{:.3f} V '.format(
old_pipulse_val) +
'\n$\pi/2-Amp_{old}$ = ' + '{:.3f} V '.format(
old_pihalfpulse_val))
self.plot_dicts['text_msg_' + k] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class RabiFrequencySweepAnalysis(RabiAnalysis):
def extract_data(self):
super().extract_data()
# Set some default values specific to RabiFrequencySweepAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
# Extract additional parameters from the HDF file.
params_dict = {}
for qbn in self.qb_names:
params_dict[f'drive_ch_{qbn}'] = \
f'Instrument settings.{qbn}.ge_I_channel'
params_dict[f'ge_freq_{qbn}'] = \
f'Instrument settings.{qbn}.ge_freq'
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def analyze_fit_results(self):
super().analyze_fit_results()
amplitudes = {qbn: np.array([[
self.proc_data_dict[
'analysis_params_dict'][f'{qbn}_{i}']['piPulse'],
self.proc_data_dict[
'analysis_params_dict'][f'{qbn}_{i}']['piPulse_stderr']]
for i in range(self.sp.length(1))]) for qbn in self.qb_names}
self.proc_data_dict['analysis_params_dict']['amplitudes'] = amplitudes
fit_dict_keys = self.prepare_fitting_pulse_amps()
self.run_fitting(keys_to_fit=fit_dict_keys)
lo_freqsX = self.get_param_value('allowed_lo_freqs')
mid_freq = np.mean(lo_freqsX)
self.proc_data_dict['analysis_params_dict']['rabi_model_lo'] = {}
func_repr = lambda a, b, c: \
f'{a} * (x / 1e9) ** 2 + {b} * x/ 1e9 + {c}'
for qbn in self.qb_names:
drive_ch = self.raw_data_dict[f'drive_ch_{qbn}']
pd = self.get_data_from_timestamp_list({
f'ch_amp': f'Instrument settings.Pulsar.{drive_ch}_amp'})
fit_res_L = self.fit_dicts[f'amplitude_fit_left_{qbn}']['fit_res']
fit_res_R = self.fit_dicts[f'amplitude_fit_right_{qbn}']['fit_res']
rabi_model_lo = \
f'lambda x : np.minimum({pd["ch_amp"]}, ' \
f'({func_repr(**fit_res_R.best_values)}) * (x >= {mid_freq})' \
f'+ ({func_repr(**fit_res_L.best_values)}) * (x < {mid_freq}))'
self.proc_data_dict['analysis_params_dict']['rabi_model_lo'][
qbn] = rabi_model_lo
def prepare_fitting_pulse_amps(self):
exclude_freq_indices = self.get_param_value('exclude_freq_indices', {})
# TODO: generalize the code for len(allowed_lo_freqs) > 2
lo_freqsX = self.get_param_value('allowed_lo_freqs')
if lo_freqsX is None:
raise ValueError('allowed_lo_freqs not found.')
fit_dict_keys = []
self.proc_data_dict['analysis_params_dict']['optimal_vals'] = {}
for i, qbn in enumerate(self.qb_names):
excl_idxs = exclude_freq_indices.get(qbn, [])
param = [p for p in self.mospm[qbn] if 'freq' in p][0]
freqs = self.sp.get_sweep_params_property('values', 1, param)
ampls = deepcopy(self.proc_data_dict['analysis_params_dict'][
'amplitudes'][qbn])
if len(excl_idxs):
mask = np.array([i in excl_idxs for i in np.arange(len(freqs))])
ampls = ampls[np.logical_not(mask)]
freqs = freqs[np.logical_not(mask)]
if 'cal_data' not in self.proc_data_dict['analysis_params_dict']:
self.proc_data_dict['analysis_params_dict']['cal_data'] = {}
self.proc_data_dict['analysis_params_dict']['cal_data'][qbn] = \
[freqs, ampls[:, 0]]
optimal_idx = np.argmin(np.abs(
freqs - self.raw_data_dict[f'ge_freq_{qbn}']))
self.proc_data_dict['analysis_params_dict']['optimal_vals'][qbn] = \
(freqs[optimal_idx], ampls[optimal_idx, 0], ampls[optimal_idx, 1])
mid_freq = np.mean(lo_freqsX)
fit_func = lambda x, a, b, c: a * x ** 2 + b * x + c
# fit left range
model = lmfit.Model(fit_func)
guess_pars = model.make_params(a=1, b=1, c=0)
self.fit_dicts[f'amplitude_fit_left_{qbn}'] = {
'fit_fn': fit_func,
'fit_xvals': {'x': freqs[freqs < mid_freq]/1e9},
'fit_yvals': {'data': ampls[freqs < mid_freq, 0]},
'fit_yvals_stderr': ampls[freqs < mid_freq, 1],
'guess_pars': guess_pars}
# fit right range
model = lmfit.Model(fit_func)
guess_pars = model.make_params(a=1, b=1, c=0)
self.fit_dicts[f'amplitude_fit_right_{qbn}'] = {
'fit_fn': fit_func,
'fit_xvals': {'x': freqs[freqs >= mid_freq]/1e9},
'fit_yvals': {'data': ampls[freqs >= mid_freq, 0]},
'fit_yvals_stderr': ampls[freqs >= mid_freq, 1],
'guess_pars': guess_pars}
fit_dict_keys += [f'amplitude_fit_left_{qbn}',
f'amplitude_fit_right_{qbn}']
return fit_dict_keys
def prepare_plots(self):
if self.get_param_value('plot_all_traces', True):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
base_plot_name = f'Rabi_amplitudes_{qbn}'
title = f'{self.raw_data_dict["timestamp"]} ' \
f'{self.raw_data_dict["measurementstring"]}\n{qbn}'
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
param = [p for p in self.mospm[qbn] if 'freq' in p][0]
xlabel = self.sp.get_sweep_params_property('label', 1, param)
xunit = self.sp.get_sweep_params_property('unit', 1, param)
lo_freqsX = self.get_param_value('allowed_lo_freqs')
# plot upper sideband
fit_dict = self.fit_dicts[f'amplitude_fit_left_{qbn}']
fit_res = fit_dict['fit_res']
xmin = min(fit_dict['fit_xvals']['x'])
self.plot_dicts[f'{base_plot_name}_left_data'] = {
'plotfn': self.plot_line,
'fig_id': base_plot_name,
'plotsize': plotsize,
'xvals': fit_dict['fit_xvals']['x'],
'xlabel': xlabel,
'xunit': xunit,
'yvals': fit_dict['fit_yvals']['data'],
'ylabel': '$\\pi$-pulse amplitude, $A$',
'yunit': 'V',
'setlabel': f'USB, LO at {np.min(lo_freqsX)/1e9:.3f} GHz',
'title': title,
'linestyle': 'none',
'do_legend': False,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'yerr': fit_dict['fit_yvals_stderr'],
'color': 'C0'
}
self.plot_dicts[f'{base_plot_name}_left_fit'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'USB quadratic fit',
'color': 'C0',
'do_legend': True,
# 'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
# plot lower sideband
fit_dict = self.fit_dicts[f'amplitude_fit_right_{qbn}']
fit_res = fit_dict['fit_res']
xmax = max(fit_dict['fit_xvals']['x'])
self.plot_dicts[f'{base_plot_name}_right_data'] = {
'plotfn': self.plot_line,
'fig_id': base_plot_name,
'xvals': fit_dict['fit_xvals']['x'],
'xlabel': xlabel,
'xunit': xunit,
'yvals': fit_dict['fit_yvals']['data'],
'ylabel': '$\\pi$-pulse amplitude, $A$',
'yunit': 'V',
'setlabel': f'LSB, LO at {np.max(lo_freqsX)/1e9:.3f} GHz',
'title': title,
'linestyle': 'none',
'do_legend': False,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'yerr': fit_dict['fit_yvals_stderr'],
'color': 'C1'
}
self.plot_dicts[f'{base_plot_name}_right_fit'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'LSB quadratic fit',
'color': 'C1',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
# max ch amp line
drive_ch = self.raw_data_dict[f'drive_ch_{qbn}']
pd = self.get_data_from_timestamp_list({
f'ch_amp': f'Instrument settings.Pulsar.{drive_ch}_amp'})
self.plot_dicts[f'ch_amp_line_{qbn}'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': pd['ch_amp'],
'xmin': xmax,
'xmax': xmin,
'colors': 'k'}
class T1Analysis(MultiQubit_TimeDomain_Analysis):
def extract_data(self):
super().extract_data()
params_dict = {}
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
s = 'Instrument settings.'+qbn
params_dict[f'{trans_name}_T1_'+qbn] = \
s + ('.T1' if trans_name == 'ge' else f'.T1_{trans_name}')
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
exp_decay_mod = lmfit.Model(fit_mods.ExpDecayFunc)
guess_pars = fit_mods.exp_dec_guess(
model=exp_decay_mod, data=data, t=sweep_points)
guess_pars['amplitude'].vary = True
guess_pars['tau'].vary = True
if self.options_dict.get('vary_offset', False):
guess_pars['offset'].vary = True
else:
guess_pars['offset'].value = 0
guess_pars['offset'].vary = False
self.set_user_guess_pars(guess_pars)
key = 'exp_decay_' + qbn
self.fit_dicts[key] = {
'fit_fn': exp_decay_mod.func,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
fit_res = self.fit_dicts['exp_decay_' + qbn]['fit_res']
for par in fit_res.params:
if fit_res.params[par].stderr is None:
log.warning(f'Stderr for {par} is None. Setting it to 0.')
fit_res.params[par].stderr = 0
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn]['T1'] = \
fit_res.best_values['tau']
self.proc_data_dict['analysis_params_dict'][qbn]['T1_stderr'] = \
fit_res.params['tau'].stderr
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
# rename base plot
base_plot_name = 'T1_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['exp_decay_' + qbn]['fit_res'],
'setlabel': 'exp decay fit',
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
trans_name = self.get_transition_name(qbn)
old_T1_val = self.raw_data_dict[f'{trans_name}_T1_'+qbn]
if old_T1_val != old_T1_val:
old_T1_val = 0
T1_dict = self.proc_data_dict['analysis_params_dict']
textstr = '$T_1$ = {:.2f} $\mu$s'.format(
T1_dict[qbn]['T1']*1e6) \
+ ' $\pm$ {:.2f} $\mu$s'.format(
T1_dict[qbn]['T1_stderr']*1e6) \
+ '\nold $T_1$ = {:.2f} $\mu$s'.format(old_T1_val*1e6)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class RamseyAnalysis(MultiQubit_TimeDomain_Analysis):
"""
Analysis for a Ramsey measurement.
Parameters recognized in the options_dict:
- artificial_detuning_dict (dict; default: None): has the form
{qbn: artificial detuning value}
- artificial_detuning (float or dict; default: None): accepted parameter
for legacy reasons. Can be the same as artificial_detuning_dict or just
a single value which will be used for all qubits.
- fit_gaussian_decay (bool; default: True): whether to fit with a Gaussian
envelope for the oscillations in addition to the exponential decay
envelope.
"""
def extract_data(self):
super().extract_data()
params_dict = {}
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
s = 'Instrument settings.'+qbn
params_dict[f'{trans_name}_freq_'+qbn] = s+f'.{trans_name}_freq'
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def prepare_fitting(self):
if self.get_param_value('fit_gaussian_decay', default_value=True):
self.fit_keys = ['exp_decay_', 'gauss_decay_']
else:
self.fit_keys = ['exp_decay_']
self.fit_dicts = OrderedDict()
def add_fit_dict(qbn, data, fit_keys):
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
for i, key in enumerate(fit_keys):
exp_damped_decay_mod = lmfit.Model(fit_mods.ExpDampOscFunc)
guess_pars = fit_mods.exp_damp_osc_guess(
model=exp_damped_decay_mod, data=data, t=sweep_points,
n_guess=i+1)
guess_pars['amplitude'].vary = False
guess_pars['amplitude'].value = 0.5
guess_pars['frequency'].vary = True
guess_pars['tau'].vary = True
guess_pars['phase'].vary = True
guess_pars['n'].vary = False
guess_pars['oscillation_offset'].vary = \
'f' in self.data_to_fit[qbn]
# guess_pars['exponential_offset'].value = 0.5
guess_pars['exponential_offset'].vary = True
self.set_user_guess_pars(guess_pars)
self.fit_dicts[key] = {
'fit_fn': exp_damped_decay_mod .func,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
for qbn in self.qb_names:
all_data = self.proc_data_dict['data_to_fit'][qbn]
if self.get_param_value('TwoD'):
for i, data in enumerate(all_data):
fit_keys = [f'{fk}{qbn}_{i}' for fk in self.fit_keys]
add_fit_dict(qbn, data, fit_keys)
else:
fit_keys = [f'{fk}{qbn}' for fk in self.fit_keys]
add_fit_dict(qbn, all_data, fit_keys)
def analyze_fit_results(self):
self.artificial_detuning_dict = self.get_param_value(
'artificial_detuning_dict')
if self.artificial_detuning_dict is None:
artificial_detuning = self.get_param_value('artificial_detuning')
if 'preprocessed_task_list' in self.metadata:
pptl = self.metadata['preprocessed_task_list']
self.artificial_detuning_dict = OrderedDict([
(t['qb'], t['artificial_detuning']) for t in pptl
])
elif artificial_detuning is not None:
# legacy case
if isinstance(artificial_detuning, dict):
self.artificial_detuning_dict = artificial_detuning
else:
self.artificial_detuning_dict = OrderedDict(
[(qbn, artificial_detuning) for qbn in self.qb_names])
if self.artificial_detuning_dict is None:
raise ValueError('"artificial_detuning" not found.')
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for k, fit_dict in self.fit_dicts.items():
# k is of the form fot_type_qbn_i if TwoD else fit_type_qbn
split_key = k.split('_')
fit_type = '_'.join(split_key[:2])
qbn = split_key[2]
if len(split_key[2:]) == 1:
outer_key = qbn
else:
# TwoD: out_key = qbn_i
outer_key = '_'.join(split_key[2:])
if outer_key not in self.proc_data_dict['analysis_params_dict']:
self.proc_data_dict['analysis_params_dict'][outer_key] = \
OrderedDict()
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type] = \
OrderedDict()
fit_res = fit_dict['fit_res']
for par in fit_res.params:
if fit_res.params[par].stderr is None:
log.warning(f'Stderr for {par} is None. Setting it to 0.')
fit_res.params[par].stderr = 0
trans_name = self.get_transition_name(qbn)
old_qb_freq = self.raw_data_dict[f'{trans_name}_freq_'+qbn]
if old_qb_freq != old_qb_freq:
old_qb_freq = 0
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'old_qb_freq'] = old_qb_freq
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'new_qb_freq'] = old_qb_freq + \
self.artificial_detuning_dict[qbn] - \
fit_res.best_values['frequency']
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'new_qb_freq_stderr'] = fit_res.params['frequency'].stderr
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'T2_star'] = fit_res.best_values['tau']
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'T2_star_stderr'] = fit_res.params['tau'].stderr
self.proc_data_dict['analysis_params_dict'][outer_key][fit_type][
'artificial_detuning'] = self.artificial_detuning_dict[qbn]
hdf_group_name_suffix = self.options_dict.get(
'hdf_group_name_suffix', '')
self.save_processed_data(key='analysis_params_dict' +
hdf_group_name_suffix)
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
apd = self.proc_data_dict['analysis_params_dict']
for outer_key, ramsey_pars_dict in apd.items():
if outer_key in ['qubit_frequencies', 'reparking_params']:
# This is only for ReparkingRamseyAnalysis.
# It is handled by prepare_fitting_qubit_freqs of that class
continue
# outer_key is of the form qbn_i if TwoD else qbn.
# split into qbn and i. (outer_key + '_') is needed because if
# outer_key = qbn doing outer_key.split('_') will only have one
# output and assignment to two variables will fail.
qbn, ii = (outer_key + '_').split('_')[:2]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points']
first_sweep_param = self.get_first_sweep_param(
qbn, dimension=1)
if len(ii) and first_sweep_param is not None:
# TwoD
label, unit, vals = first_sweep_param
title_suffix = (f'{ii}: {label} = ' + ' '.join(
SI_val_to_msg_str(vals[int(ii)], unit,
return_type=lambda x: f'{x:0.1f}')))
daa = self.metadata.get('drive_amp_adaptation', {}).get(
qbn, None)
if daa is not None:
sweep_points = sweep_points * daa[int(ii)]
else:
# OneD
title_suffix = ''
base_plot_name = 'Ramsey_' + outer_key
dtf = self.proc_data_dict['data_to_fit'][qbn]
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=dtf[int(ii)] if ii != '' else dtf,
sweep_points=sweep_points,
plot_name_suffix=qbn+'fit',
qb_name=qbn, TwoD=False,
title_suffix=title_suffix)
exp_dec_k = self.fit_keys[0][:-1]
old_qb_freq = ramsey_pars_dict[exp_dec_k]['old_qb_freq']
textstr = ''
T2_star_str = ''
for i, fit_type in enumerate(ramsey_pars_dict):
fit_res = self.fit_dicts[f'{fit_type}_{outer_key}']['fit_res']
self.plot_dicts[f'fit_{outer_key}_{fit_type}'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'exp decay fit' if i == 0 else
'gauss decay fit',
'do_legend': True,
'color': 'r' if i == 0 else 'C4',
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if i != 0:
textstr += '\n'
textstr += \
('$f_{{qubit \_ new \_ {{{key}}} }}$ = '.format(
key=('exp' if i == 0 else 'gauss')) +
'{:.6f} GHz '.format(
ramsey_pars_dict[fit_type]['new_qb_freq']*1e-9) +
'$\pm$ {:.3f} MHz '.format(
ramsey_pars_dict[fit_type][
'new_qb_freq_stderr']*1e-6))
T2_star_str += \
('\n$T_{{2,{{{key}}} }}^\star$ = '.format(
key=('exp' if i == 0 else 'gauss')) +
'{:.2f} $\mu$s'.format(
fit_res.params['tau'].value*1e6) +
'$\pm$ {:.2f} $\mu$s'.format(
fit_res.params['tau'].stderr*1e6))
textstr += '\n$f_{qubit \_ old}$ = '+'{:.6f} GHz '.format(
old_qb_freq*1e-9)
art_det = ramsey_pars_dict[exp_dec_k][
'artificial_detuning']*1e-6
delta_f = (ramsey_pars_dict[exp_dec_k]['new_qb_freq'] -
old_qb_freq)*1e-6
textstr += ('\n$\Delta f$ = {:.4f} MHz '.format(delta_f) +
'$\pm$ {:.3f} kHz'.format(
self.fit_dicts[f'{exp_dec_k}_{outer_key}']['fit_res'].params[
'frequency'].stderr*1e-3) +
'\n$f_{Ramsey}$ = '+'{:.4f} MHz $\pm$ {:.3f} kHz'.format(
self.fit_dicts[f'{exp_dec_k}_{outer_key}']['fit_res'].params[
'frequency'].value*1e-6,
self.fit_dicts[f'{exp_dec_k}_{outer_key}']['fit_res'].params[
'frequency'].stderr*1e-3))
textstr += T2_star_str
textstr += '\nartificial detuning = {:.2f} MHz'.format(art_det)
color = 'k'
if np.abs(delta_f) > np.abs(art_det):
# We don't want this: if the qubit detuning is larger than
# the artificial detuning, the sign of the qubit detuning
# cannot be determined from a single Ramsey measurement.
# Save a warning image and highlight in red
# the Delta f and artificial detuning rows in textstr
self._warning_message += (f'\nQubit {qbn} frequency change '
f'({np.abs(delta_f):.5f} MHz) is larger'
f' than the artificial detuning of '
f'{art_det:.5f} MHz. In this case, the '
f'sign of the qubit detuning cannot be '
f'determined from a single Ramsey '
f'measurement.')
self._raise_warning_image = True
textstr = textstr.split('\n')
color = ['black']*len(textstr)
idx = [i for i, s in enumerate(textstr) if 'Delta f' in s][0]
color[idx] = 'red'
idx = [i for i, s in enumerate(textstr) if
'artificial detuning' in s][0]
color[idx] = 'red'
self.plot_dicts['text_msg_' + outer_key] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': -0.025,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'color': color,
'plotfn': self.plot_text,
'text_string': textstr}
self.plot_dicts['half_hline_' + outer_key] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': 0.5,
'xmin': sweep_points[0],
'xmax': sweep_points[-1],
'colors': 'gray'}
class ReparkingRamseyAnalysis(RamseyAnalysis):
def extract_data(self):
super().extract_data()
# Set some default values specific to ReparkingRamseyAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def analyze_fit_results(self):
super().analyze_fit_results()
freqs = OrderedDict()
if self.get_param_value('freq_from_gaussian_fit', False):
self.fit_type = self.fit_keys[1][:-1]
else:
self.fit_type = self.fit_keys[0][:-1]
apd = self.proc_data_dict['analysis_params_dict']
for qbn in self.qb_names:
freqs[qbn] = \
{'val': np.array([d[self.fit_type]['new_qb_freq']
for k, d in apd.items() if qbn in k]),
'stderr': np.array([d[self.fit_type]['new_qb_freq_stderr']
for k, d in apd.items() if qbn in k])}
self.proc_data_dict['analysis_params_dict']['qubit_frequencies'] = freqs
fit_dict_keys = self.prepare_fitting_qubit_freqs()
self.run_fitting(keys_to_fit=fit_dict_keys)
self.proc_data_dict['analysis_params_dict']['reparking_params'] = {}
for qbn in self.qb_names:
fit_dict = self.fit_dicts[f'frequency_fit_{qbn}']
fit_res = fit_dict['fit_res']
new_ss_freq = fit_res.best_values['f0']
new_ss_volt = fit_res.best_values['V0']
par_name = \
[p for p in self.proc_data_dict['sweep_points_2D_dict'][qbn]
if 'offset' not in p][0]
voltages = self.sp.get_sweep_params_property('values', 1, par_name)
if new_ss_volt < min(voltages) or new_ss_volt > max(voltages):
# if the fitted voltage is outside the sweep points range take
# the max or min of range depending on where the fitted point is
idx = np.argmin(voltages) if new_ss_volt < min(voltages) else \
np.argmax(voltages)
new_ss_volt = min(voltages) if new_ss_volt < min(voltages) else \
max(voltages)
freqs = self.proc_data_dict['analysis_params_dict'][
'qubit_frequencies'][qbn]['val']
new_ss_freq = freqs[idx]
log.warning(f"New sweet spot voltage suggested by fitting "
f"is {fit_res.best_values['V0']:.6f} and exceeds "
f"the voltage range [{min(voltages):.6f}, "
f"{max(voltages):.6f}] that is swept. New sweet "
f"spot voltage set to {new_ss_volt:.6f}.")
self.proc_data_dict['analysis_params_dict'][
'reparking_params'][qbn] = {
'new_ss_vals': {'ss_freq': new_ss_freq,
'ss_volt': new_ss_volt},
'fitted_vals': {'ss_freq': fit_res.best_values['f0'],
'ss_volt': fit_res.best_values['V0']}}
self.save_processed_data(key='analysis_params_dict')
def prepare_fitting_qubit_freqs(self):
fit_dict_keys = []
ss_type = self.get_param_value('sweet_spot_type')
for qbn in self.qb_names:
freqs = self.proc_data_dict['analysis_params_dict'][
'qubit_frequencies'][qbn]
par_name = \
[p for p in self.proc_data_dict['sweep_points_2D_dict'][qbn]
if 'offset' not in p][0]
voltages, _, label = self.sp.get_sweep_params_description(par_name,
1)
fit_func = lambda V, V0, f0, fv: f0 - fv * (V - V0)**2
model = lmfit.Model(fit_func)
if ss_type is None:
# define secant from outermost points to check
# convexity and decide for USS or LSS
secant_gradient = ((freqs['val'][-1] - freqs['val'][0])
/ (voltages[-1] - voltages[0]))
secant = lambda x: secant_gradient * x + freqs['val'][-1] \
- secant_gradient * voltages[-1]
# compute convexity as trapezoid integral of difference to
# secant
delta_secant = np.array(freqs['val'] - secant(voltages))
convexity = np.sum((delta_secant[:-1] + delta_secant[1:]) / 2
* (voltages[1:] - voltages[:-1]))
self.fit_uss = convexity >= 0
else:
self.fit_uss = ss_type == 'upper'
# set initial values of fitting parameters depending on USS or LSS
if self.fit_uss: # USS
guess_pars_dict = {'V0': voltages[np.argmax(freqs['val'])],
'f0': np.max(np.array(freqs['val'])),
'fv': 2.5e9}
else: # LSS
guess_pars_dict = {'V0': voltages[np.argmin(freqs['val'])],
'f0': np.min(np.array(freqs['val'])),
'fv': -2.5e9}
guess_pars = model.make_params(**guess_pars_dict)
self.fit_dicts[f'frequency_fit_{qbn}'] = {
'fit_fn': fit_func,
'fit_xvals': {'V': voltages},
'fit_yvals': {'data': freqs['val']},
'fit_yvals_stderr': freqs['stderr'],
'guess_pars': guess_pars}
fit_dict_keys += [f'frequency_fit_{qbn}']
return fit_dict_keys
def prepare_plots(self):
if self.get_param_value('plot_all_traces', True):
super().prepare_plots()
if self.do_fitting:
current_voltages = self.get_param_value('current_voltages', {})
for qbn in self.qb_names:
base_plot_name = f'reparking_{qbn}'
title = f'{self.raw_data_dict["timestamp"]} ' \
f'{self.raw_data_dict["measurementstring"]}\n{qbn}'
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
par_name = \
[p for p in self.proc_data_dict['sweep_points_2D_dict'][qbn]
if 'offset' not in p][0]
voltages, xunit, xlabel = self.sp.get_sweep_params_description(
par_name, 1)
fit_dict = self.fit_dicts[f'frequency_fit_{qbn}']
fit_res = fit_dict['fit_res']
self.plot_dicts[base_plot_name] = {
'plotfn': self.plot_line,
'fig_id': base_plot_name,
'plotsize': plotsize,
'xvals': fit_dict['fit_xvals']['V'],
'xlabel': xlabel,
'xunit': xunit,
'yvals': fit_dict['fit_yvals']['data'],
'ylabel': 'Qubit frequency, $f$',
'yunit': 'Hz',
'setlabel': 'Data',
'title': title,
'linestyle': 'none',
'do_legend': False,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'yerr': fit_dict['fit_yvals_stderr'],
'color': 'C0'
}
self.plot_dicts[f'{base_plot_name}_fit'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'Fit',
'color': 'C0',
'do_legend': True,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
# old qb freq is the same for all keys in
# self.proc_data_dict['analysis_params_dict'] so take qbn_0
old_qb_freq = self.proc_data_dict['analysis_params_dict'][
f'{qbn}_0'][self.fit_type]['old_qb_freq']
# new ss values
ss_vals = self.proc_data_dict['analysis_params_dict'][
'reparking_params'][qbn]['new_ss_vals']
textstr = \
"SS frequency: " \
f"{ss_vals['ss_freq']/1e9:.6f} GHz " \
f"\nSS DC voltage: " \
f"{ss_vals['ss_volt']:.6f} V " \
f"\nPrevious SS frequency: {old_qb_freq/1e9:.6f} GHz "
if qbn in current_voltages:
old_voltage = current_voltages[qbn]
textstr += f"\nPrevious SS DC voltage: {old_voltage:.6f} V"
self.plot_dicts[f'{base_plot_name}_text'] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': -0.1,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
self.plot_dicts[f'{base_plot_name}_marker'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': [ss_vals['ss_volt']],
'yvals': [ss_vals['ss_freq']],
'color': 'r',
'marker': 'o',
'line_kws': {'markersize': 10},
'linestyle': ''}
class QScaleAnalysis(MultiQubit_TimeDomain_Analysis):
def extract_data(self):
super().extract_data()
params_dict = {}
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
s = 'Instrument settings.'+qbn
params_dict[f'{trans_name}_qscale_'+qbn] = \
s+f'.{trans_name}_motzoi'
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def process_data(self):
super().process_data()
self.proc_data_dict['qscale_data'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['qscale_data'][qbn] = OrderedDict()
sweep_points = deepcopy(self.proc_data_dict['sweep_points_dict'][
qbn]['msmt_sweep_points'])
# check if the sweep points are repeated 3 times as they have to be
# for the qscale analysis:
# Takes the first 3 entries and check if they are all the same or different.
# Needed For backwards compatibility with QudevTransmon.measure_qscale()
# that does not (yet) use Sweeppoints object.
unique_sp = np.unique(sweep_points[:3])
if unique_sp.size > 1:
sweep_points = np.repeat(sweep_points, 3)
# replace in proc_data_dict; otherwise plotting in base class fails
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = sweep_points
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'] = np.concatenate([
sweep_points, self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points']])
data = self.proc_data_dict['data_to_fit'][qbn]
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xx'] = \
sweep_points[0::3]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xy'] = \
sweep_points[1::3]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xmy'] = \
sweep_points[2::3]
self.proc_data_dict['qscale_data'][qbn]['data_xx'] = \
data[0::3]
self.proc_data_dict['qscale_data'][qbn]['data_xy'] = \
data[1::3]
self.proc_data_dict['qscale_data'][qbn]['data_xmy'] = \
data[2::3]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
for msmt_label in ['_xx', '_xy', '_xmy']:
sweep_points = self.proc_data_dict['qscale_data'][qbn][
'sweep_points' + msmt_label]
data = self.proc_data_dict['qscale_data'][qbn][
'data' + msmt_label]
# As a workaround for a weird bug letting crash the analysis
# every second time, we do not use lmfit.models.ConstantModel
# and lmfit.models.LinearModel, but create custom models.
if msmt_label == '_xx':
model = lmfit.Model(lambda x, c: c)
guess_pars = model.make_params(c=np.mean(data))
else:
model = lmfit.Model(lambda x, slope, intercept:
slope * x + intercept)
slope = (data[-1] - data[0]) / \
(sweep_points[-1] - sweep_points[0])
intercept = data[-1] - slope * sweep_points[-1]
guess_pars = model.make_params(slope=slope,
intercept=intercept)
self.set_user_guess_pars(guess_pars)
key = 'fit' + msmt_label + '_' + qbn
self.fit_dicts[key] = {
'fit_fn': model.func,
'fit_xvals': {'x': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
# The best qscale parameter is the point where all 3 curves intersect.
threshold = 0.02
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
fitparams0 = self.fit_dicts['fit_xx'+'_'+qbn]['fit_res'].params
fitparams1 = self.fit_dicts['fit_xy'+'_'+qbn]['fit_res'].params
fitparams2 = self.fit_dicts['fit_xmy'+'_'+qbn]['fit_res'].params
intercept_diff_mean = fitparams1['intercept'].value - \
fitparams2['intercept'].value
slope_diff_mean = fitparams2['slope'].value - \
fitparams1['slope'].value
optimal_qscale = intercept_diff_mean/slope_diff_mean
# Warning if Xpi/2Xpi line is not within +/-threshold of 0.5
if (fitparams0['c'].value > (0.5 + threshold)) or \
(fitparams0['c'].value < (0.5 - threshold)):
log.warning('The trace from the X90-X180 pulses is '
'NOT within $\pm${} of the expected value '
'of 0.5.'.format(threshold))
# Warning if optimal_qscale is not within +/-threshold of 0.5
y_optimal_qscale = optimal_qscale * fitparams2['slope'].value + \
fitparams2['intercept'].value
if (y_optimal_qscale > (0.5 + threshold)) or \
(y_optimal_qscale < (0.5 - threshold)):
log.warning('The optimal qscale found gives a population '
'that is NOT within $\pm${} of the expected '
'value of 0.5.'.format(threshold))
# Calculate standard deviation
intercept_diff_std_squared = \
fitparams1['intercept'].stderr**2 + \
fitparams2['intercept'].stderr**2
slope_diff_std_squared = \
fitparams2['slope'].stderr**2 + fitparams1['slope'].stderr**2
optimal_qscale_stderr = np.sqrt(
intercept_diff_std_squared*(1/slope_diff_mean**2) +
slope_diff_std_squared*(intercept_diff_mean /
(slope_diff_mean**2))**2)
self.proc_data_dict['analysis_params_dict'][qbn]['qscale'] = \
optimal_qscale
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale_stderr'] = optimal_qscale_stderr
def prepare_plots(self):
super().prepare_plots()
color_dict = {'_xx': '#365C91',
'_xy': '#683050',
'_xmy': '#3C7541'}
label_dict = {'_xx': r'$X_{\pi/2}X_{\pi}$',
'_xy': r'$X_{\pi/2}Y_{\pi}$',
'_xmy': r'$X_{\pi/2}Y_{-\pi}$'}
for qbn in self.qb_names:
base_plot_name = 'Qscale_' + qbn
for msmt_label in ['_xx', '_xy', '_xmy']:
sweep_points = self.proc_data_dict['qscale_data'][qbn][
'sweep_points' + msmt_label]
data = self.proc_data_dict['qscale_data'][qbn][
'data' + msmt_label]
if msmt_label == '_xx':
plot_name = base_plot_name
else:
plot_name = 'data' + msmt_label + '_' + qbn
xlabel, xunit = self.get_xaxis_label_unit(qbn)
self.plot_dicts[plot_name] = {
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': data,
'ylabel': self.get_yaxis_label(qb_name=qbn),
'yunit': '',
'setlabel': 'Data\n' + label_dict[msmt_label],
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\n' + qbn),
'linestyle': 'none',
'color': color_dict[msmt_label],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
if msmt_label != '_xx':
self.plot_dicts[plot_name]['fig_id'] = base_plot_name
if self.do_fitting:
# plot fit
xfine = np.linspace(sweep_points[0], sweep_points[-1], 1000)
fit_key = 'fit' + msmt_label + '_' + qbn
fit_res = self.fit_dicts[fit_key]['fit_res']
yvals = fit_res.model.func(xfine, **fit_res.best_values)
if not hasattr(yvals, '__iter__'):
yvals = np.array(len(xfine)*[yvals])
self.plot_dicts[fit_key] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': xfine,
'yvals': yvals,
'marker': '',
'setlabel': 'Fit\n' + label_dict[msmt_label],
'do_legend': True,
'color': color_dict[msmt_label],
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
trans_name = self.get_transition_name(qbn)
old_qscale_val = self.raw_data_dict[
f'{trans_name}_qscale_'+qbn]
if old_qscale_val != old_qscale_val:
old_qscale_val = 0
textstr = 'Qscale = {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale'],
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale_stderr']) + \
'\nold Qscale= {:.4f}'.format(old_qscale_val)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.225,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
# plot cal points
if self.num_cal_points != 0:
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name = list(self.cal_states_dict)[i] + \
'_' + qbn
self.plot_dicts[plot_dict_name] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.mean([
self.proc_data_dict['sweep_points_dict'][qbn]
['cal_points_sweep_points'][cal_pts_idxs],
self.proc_data_dict['sweep_points_dict'][qbn]
['cal_points_sweep_points'][cal_pts_idxs]],
axis=0),
'yvals': self.proc_data_dict[
'data_to_fit'][qbn][cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
self.plot_dicts[plot_dict_name + '_line'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': np.mean(
self.proc_data_dict[
'data_to_fit'][qbn][cal_pts_idxs]),
'xmin': self.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][-1],
'colors': 'gray'}
class EchoAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
"""
This class is different to the other single qubit calib analysis classes
(Rabi, Ramsey, QScale, T1).
The analysis for an Echo measurement is identical to the T1 analysis
if no artificial_detuing was used, and identical to the Ramsey analysis
if an artificial_detuning was used. Hence, this class contains the
attribute self.echo_analysis which is an instance of either T1 or Ramsey
analysis.
"""
auto = kwargs.pop('auto', True)
super().__init__(*args, auto=False, **kwargs)
if self.options_dict.get('artificial_detuning', None) is not None:
self.echo_analysis = RamseyAnalysis(*args, auto=False, **kwargs)
else:
if 'options_dict' in kwargs:
# kwargs.pop('options_dict')
kwargs['options_dict'].update({'vary_offset': True})
else:
kwargs['options_dict'] = {'vary_offset': True}
self.echo_analysis = T1Analysis(*args, auto=False, **kwargs)
if auto:
try:
self.echo_analysis.extract_data()
self.echo_analysis.process_data()
self.echo_analysis.prepare_fitting()
self.echo_analysis.run_fitting()
self.echo_analysis.save_fit_results()
self.analyze_fit_results()
self.prepare_plots()
except Exception as e:
if self.raise_exceptions:
raise e
else:
log.error("Unhandled error during analysis!")
log.error(traceback.format_exc())
def analyze_fit_results(self):
self.echo_analysis.analyze_fit_results()
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
params_dict = self.echo_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
if 'T1' in params_dict:
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo'] = params_dict['T1']
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo_stderr'] = params_dict['T1_stderr']
else:
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo'] = params_dict['exp_decay']['T2_star']
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo_stderr'] = params_dict['exp_decay'][
'T2_star_stderr']
def prepare_plots(self):
self.echo_analysis.prepare_plots()
for qbn in self.qb_names:
# rename base plot
figure_name = 'Echo_' + qbn
echo_plot_key_t1 = [key for key in self.echo_analysis.plot_dicts if
'T1_'+qbn in key]
echo_plot_key_ram = [key for key in self.echo_analysis.plot_dicts if
'Ramsey_'+qbn in key]
if len(echo_plot_key_t1) != 0:
echo_plot_name = echo_plot_key_t1[0]
elif len(echo_plot_key_ram) != 0:
echo_plot_name = echo_plot_key_ram[0]
else:
raise ValueError('Neither T1 nor Ramsey plots were found.')
self.echo_analysis.plot_dicts[echo_plot_name][
'legend_pos'] = 'upper right'
self.echo_analysis.plot_dicts[echo_plot_name][
'legend_bbox_to_anchor'] = (1, -0.15)
for plot_label in self.echo_analysis.plot_dicts:
if qbn in plot_label:
if 'raw' not in plot_label and 'projected' not in plot_label:
self.echo_analysis.plot_dicts[plot_label]['fig_id'] = \
figure_name
old_T2e_val = a_tools.get_instr_setting_value_from_file(
file_path=self.echo_analysis.raw_data_dict['folder'],
instr_name=qbn, param_name='T2{}'.format(
'_ef' if 'f' in self.echo_analysis.data_to_fit[qbn]
else ''))
T2_dict = self.proc_data_dict['analysis_params_dict']
textstr = '$T_2$ echo = {:.2f} $\mu$s'.format(
T2_dict[qbn]['T2_echo']*1e6) \
+ ' $\pm$ {:.2f} $\mu$s'.format(
T2_dict[qbn]['T2_echo_stderr']*1e6) \
+ '\nold $T_2$ echo = {:.2f} $\mu$s'.format(
old_T2e_val*1e6)
self.echo_analysis.plot_dicts['text_msg_' + qbn][
'text_string'] = textstr
self.echo_analysis.plot(key_list='auto')
self.echo_analysis.save_figures(close_figs=True)
class RamseyAddPulseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, auto=False, **kwargs)
options_dict = kwargs.pop('options_dict', OrderedDict())
options_dict_no = deepcopy(options_dict)
options_dict_no.update(dict(
data_filter=lambda raw: np.concatenate([
raw[:-4][1::2], raw[-4:]]),
hdf_group_name_suffix='_no_pulse'))
self.ramsey_analysis = RamseyAnalysis(
*args, auto=False, options_dict=options_dict_no,
**kwargs)
options_dict_with = deepcopy(options_dict)
options_dict_with.update(dict(
data_filter=lambda raw: np.concatenate([
raw[:-4][0::2], raw[-4:]]),
hdf_group_name_suffix='_with_pulse'))
self.ramsey_add_pulse_analysis = RamseyAnalysis(
*args, auto=False, options_dict=options_dict_with,
**kwargs)
if auto:
self.ramsey_analysis.extract_data()
self.ramsey_analysis.process_data()
self.ramsey_analysis.prepare_fitting()
self.ramsey_analysis.run_fitting()
self.ramsey_analysis.save_fit_results()
self.ramsey_add_pulse_analysis.extract_data()
self.ramsey_add_pulse_analysis.process_data()
self.ramsey_add_pulse_analysis.prepare_fitting()
self.ramsey_add_pulse_analysis.run_fitting()
self.ramsey_add_pulse_analysis.save_fit_results()
self.raw_data_dict = self.ramsey_analysis.raw_data_dict
self.analyze_fit_results()
self.prepare_plots()
keylist = []
for qbn in self.qb_names:
figure_name = 'CrossZZ_' + qbn
keylist.append(figure_name+'with')
keylist.append(figure_name+'no')
self.plot()
self.save_figures(close_figs=True)
def analyze_fit_results(self):
self.cross_kerr = 0.0
self.ramsey_analysis.analyze_fit_results()
self.ramsey_add_pulse_analysis.analyze_fit_results()
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.params_dict_ramsey = self.ramsey_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
self.params_dict_add_pulse = \
self.ramsey_add_pulse_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
self.cross_kerr = self.params_dict_ramsey[
'exp_decay']['new_qb_freq'] \
- self.params_dict_add_pulse[
'exp_decay']['new_qb_freq']
self.cross_kerr_error = np.sqrt(
(self.params_dict_ramsey[
'exp_decay']['new_qb_freq_stderr'])**2 +
(self.params_dict_add_pulse[
'exp_decay']['new_qb_freq_stderr'])**2)
def prepare_plots(self):
self.ramsey_analysis.prepare_plots()
self.ramsey_add_pulse_analysis.prepare_plots()
self.ramsey_analysis.plot(key_list='auto')
self.ramsey_analysis.save_figures(close_figs=True, savebase='Ramsey_no')
self.ramsey_add_pulse_analysis.plot(key_list='auto')
self.ramsey_add_pulse_analysis.save_figures(close_figs=True,
savebase='Ramsey_with')
self.options_dict['plot_proj_data'] = False
self.metadata = {'plot_proj_data': False, 'plot_raw_data': False}
super().prepare_plots()
try:
xunit = self.metadata["sweep_unit"]
xlabel = self.metadata["sweep_name"]
except KeyError:
xlabel = self.raw_data_dict['sweep_parameter_names'][0]
xunit = self.raw_data_dict['sweep_parameter_units'][0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
for qbn in self.qb_names:
data_no = self.ramsey_analysis.proc_data_dict['data_to_fit'][
qbn][:-self.ramsey_analysis.num_cal_points]
data_with = self.ramsey_add_pulse_analysis.proc_data_dict[
'data_to_fit'][
qbn][:-self.ramsey_analysis.num_cal_points]
delays = self.ramsey_analysis.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][
:-self.ramsey_analysis.num_cal_points]
figure_name = 'CrossZZ_' + qbn
self.plot_dicts[figure_name+'with'] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': delays,
'yvals': data_with,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': '|e> state population',
'setlabel': 'with $\\pi$-pulse',
'title': title,
'color': 'r',
'marker': 'o',
'line_kws': {'markersize': 5},
'linestyle': 'none',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if self.do_fitting:
fit_res_with = self.ramsey_add_pulse_analysis.fit_dicts[
'exp_decay_' + qbn]['fit_res']
self.plot_dicts['fit_with_'+qbn] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'xlabel': 'Ramsey delay',
'xunit': 's',
'fit_res': fit_res_with,
'setlabel': 'with $\\pi$-pulse - fit',
'title': title,
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts[figure_name+'no'] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': delays,
'yvals': data_no,
'setlabel': 'no $\\pi$-pulse',
'title': title,
'color': 'g',
'marker': 'o',
'line_kws': {'markersize': 5},
'linestyle': 'none',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if self.do_fitting:
fit_res_no = self.ramsey_analysis.fit_dicts[
'exp_decay_' + qbn]['fit_res']
self.plot_dicts['fit_no_'+qbn] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'xlabel': 'Ramsey delay',
'xunit': 's',
'fit_res': fit_res_no,
'setlabel': 'no $\\pi$-pulse - fit',
'title': title,
'do_legend': True,
'color': 'g',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
textstr = r'$\alpha ZZ$ = {:.2f} +- {:.2f}'.format(
self.cross_kerr*1e-3, self.cross_kerr_error*1e-3) + ' kHz'
self.plot_dicts['text_msg_' + qbn] = {'fig_id': figure_name,
'text_string': textstr,
'ypos': -0.2,
'xpos': -0.075,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text}
class InPhaseAmpCalibAnalysis(MultiQubit_TimeDomain_Analysis):
def extract_data(self):
super().extract_data()
params_dict = {}
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
s = 'Instrument settings.'+qbn
params_dict[f'{trans_name}_amp180_'+qbn] = \
s+f'.{trans_name}_amp180'
self.raw_data_dict.update(
self.get_data_from_timestamp_list(params_dict))
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
model = lmfit.models.LinearModel()
guess_pars = model.guess(data=data, x=sweep_points)
guess_pars['intercept'].value = 0.5
guess_pars['intercept'].vary = False
key = 'fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': model.func,
'fit_xvals': {'x': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
trans_name = self.get_transition_name(qbn)
old_amp180 = self.raw_data_dict[
f'{trans_name}_amp180_'+qbn]
if old_amp180 != old_amp180:
old_amp180 = 0
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn][
'corrected_amp'] = old_amp180 - self.fit_dicts[
'fit_' + qbn]['fit_res'].best_values['slope']*old_amp180
self.proc_data_dict['analysis_params_dict'][qbn][
'corrected_amp_stderr'] = self.fit_dicts[
'fit_' + qbn]['fit_res'].params['slope'].stderr*old_amp180
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
# rename base plot
if self.fit_dicts['fit_' + qbn][
'fit_res'].best_values['slope'] >= 0:
base_plot_name = 'OverRotation_' + qbn
else:
base_plot_name = 'UnderRotation_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit_' + qbn]['fit_res'],
'setlabel': 'linear fit',
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
trans_name = self.get_transition_name(qbn)
old_amp180 = self.raw_data_dict[
f'{trans_name}_amp180_'+qbn]
if old_amp180 != old_amp180:
old_amp180 = 0
correction_dict = self.proc_data_dict['analysis_params_dict']
fit_res = self.fit_dicts['fit_' + qbn]['fit_res']
textstr = '$\pi$-Amp = {:.4f} mV'.format(
correction_dict[qbn]['corrected_amp']*1e3) \
+ ' $\pm$ {:.1e} mV'.format(
correction_dict[qbn]['corrected_amp_stderr']*1e3) \
+ '\nold $\pi$-Amp = {:.4f} mV'.format(
old_amp180*1e3) \
+ '\namp. correction = {:.4f} mV'.format(
fit_res.best_values['slope']*old_amp180*1e3) \
+ '\nintercept = {:.2f}'.format(
fit_res.best_values['intercept'])
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
self.plot_dicts['half_hline_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': 0.5,
'xmin': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-1],
'colors': 'gray'}
class MultiCZgate_Calib_Analysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
options_dict = kwargs.pop('options_dict', {})
options_dict.update({'TwoD': True})
kwargs.update({'options_dict': options_dict})
self.phase_key = 'phase_diffs'
self.legend_label_func = lambda qbn, row: ''
super().__init__(*args, **kwargs)
def extract_data(self):
super().extract_data()
# Find leakage and ramsey qubit names
self.leakage_qbnames = self.get_param_value('leakage_qbnames',
default_value=[])
self.ramsey_qbnames = self.get_param_value('ramsey_qbnames',
default_value=[])
self.gates_list = self.get_param_value('gates_list', default_value=[])
if not len(self.gates_list):
# self.gates_list must exist as a list of tuples where the first
# entry in each tuple is a leakage qubit name, and the second is
# a ramsey qubit name.
self.gates_list = [(qbl, qbr) for qbl, qbr in
zip(self.leakage_qbnames, self.ramsey_qbnames)]
# prepare list of qubits on which must be considered simultaneously
# for preselection. Default: preselect on all qubits in the gate = ground
default_preselection_qbs = defaultdict(list)
for qbn in self.qb_names:
for gate_qbs in self.gates_list:
if qbn in gate_qbs:
default_preselection_qbs[qbn].extend(gate_qbs)
preselection_qbs = self.get_param_value("preselection_qbs",
default_preselection_qbs)
self.options_dict.update({"preselection_qbs": preselection_qbs})
def process_data(self):
super().process_data()
# TODO: Steph 15.09.2020
# This is a hack. It should be done in MultiQubit_TimeDomain_Analysis
# but would break every analysis inheriting from it but we just needed
# it to work for this analysis :)
self.data_to_fit = self.get_param_value('data_to_fit', {})
for qbn in self.data_to_fit:
# make values of data_to_fit be lists
if isinstance(self.data_to_fit[qbn], str):
self.data_to_fit[qbn] = [self.data_to_fit[qbn]]
# Overwrite data_to_fit in proc_data_dict
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict'].items():
if qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = {
prob_label: prob_data[prob_label] for prob_label in
self.data_to_fit[qbn]}
# Make sure data has the right shape (len(hard_sp), len(soft_sp))
for qbn, prob_data in self.proc_data_dict['data_to_fit'].items():
for prob_label, data in prob_data.items():
if data.shape[1] != self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points'].size:
self.proc_data_dict['data_to_fit'][qbn][prob_label] = data.T
# reshape data for ease of use
qbn = self.qb_names[0]
phase_sp_param_name = [p for p in self.mospm[qbn] if 'phase' in p][0]
phases = self.sp.get_sweep_params_property('values', 0,
phase_sp_param_name)
self.dim_scale_factor = len(phases) // len(np.unique(phases))
self.proc_data_dict['data_to_fit_reshaped'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['data_to_fit_reshaped'][qbn] = {
prob_label: np.reshape(
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points],
(self.dim_scale_factor * \
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points].shape[0],
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points].shape[1]//self.dim_scale_factor))
for prob_label in self.proc_data_dict['data_to_fit'][qbn]}
# convert phases to radians
for qbn in self.qb_names:
sweep_dict = self.proc_data_dict['sweep_points_dict'][qbn]
sweep_dict['sweep_points'] *= np.pi/180
def plot_traces(self, prob_label, data_2d, qbn):
plotsize = self.get_default_plot_params(set=False)[
'figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if data_2d.shape[1] != self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points'].size:
data_2d = data_2d.T
data_2d_reshaped = np.reshape(
data_2d[:, :-self.num_cal_points],
(self.dim_scale_factor*data_2d[:, :-self.num_cal_points].shape[0],
data_2d[:, :-self.num_cal_points].shape[1]//self.dim_scale_factor))
data_2d_cal_reshaped = [[data_2d[:, -self.num_cal_points:]]] * \
(self.dim_scale_factor *
data_2d[:, :-self.num_cal_points].shape[0])
ref_states_plot_dicts = {}
for row in range(data_2d_reshaped.shape[0]):
phases = np.unique(self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'])
data = data_2d_reshaped[row, :]
legend_bbox_to_anchor = (1, -0.15)
legend_pos = 'upper right'
legend_ncol = 2
if qbn in self.ramsey_qbnames and self.get_latex_prob_label(
prob_label) in [self.get_latex_prob_label(pl)
for pl in self.data_to_fit[qbn]]:
figure_name = '{}_{}_{}'.format(self.phase_key, qbn, prob_label)
elif qbn in self.leakage_qbnames and self.get_latex_prob_label(
prob_label) in [self.get_latex_prob_label(pl)
for pl in self.data_to_fit[qbn]]:
figure_name = 'Leakage_{}_{}'.format(qbn, prob_label)
else:
figure_name = 'projected_plot_' + qbn + '_' + \
prob_label
# plot cal points
if self.num_cal_points > 0:
data_w_cal = data_2d_cal_reshaped[row][0][0]
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
s = '{}_{}_{}'.format(row, qbn, prob_label)
ref_state_plot_name = list(
self.cal_states_dict)[i] + '_' + s
ref_states_plot_dicts[ref_state_plot_name] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': self.proc_data_dict[
'sweep_points_dict'][qbn][
'cal_points_sweep_points'][
cal_pts_idxs],
'yvals': data_w_cal[cal_pts_idxs],
'setlabel': list(
self.cal_states_dict)[i] if
row == 0 else '',
'do_legend': row == 0,
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_ncol': legend_ncol,
'linestyle': 'none',
'line_kws': {'color':
self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
xlabel, xunit = self.get_xaxis_label_unit(qbn)
self.plot_dicts['data_{}_{}_{}'.format(
row, qbn, prob_label)] = {
'plotfn': self.plot_line,
'fig_id': figure_name,
'plotsize': plotsize,
'xvals': phases,
'xlabel': xlabel,
'xunit': xunit,
'yvals': data,
'ylabel': self.get_yaxis_label(prob_label),
'yunit': '',
'yscale': self.get_param_value("yscale", "linear"),
'setlabel': 'Data - ' + self.legend_label_func(qbn, row)
if row in [0, 1] else '',
'title': self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] + '-' + qbn,
'linestyle': 'none',
'color': 'C0' if row % 2 == 0 else 'C2',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos}
if self.do_fitting and 'projected' not in figure_name:
if qbn in self.leakage_qbnames and self.get_param_value(
'classified_ro', False):
continue
k = 'fit_{}{}_{}_{}'.format(
'on' if row % 2 == 0 else 'off', row, prob_label, qbn)
if f'Cos_{k}' in self.fit_dicts:
fit_res = self.fit_dicts[f'Cos_{k}']['fit_res']
self.plot_dicts[k + '_' + prob_label] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'Fit - ' + self.legend_label_func(qbn, row)
if row in [0, 1] else '',
'color': 'C0' if row % 2 == 0 else 'C2',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos}
elif f'Linear_{k}' in self.fit_dicts:
fit_res = self.fit_dicts[f'Linear_{k}']['fit_res']
xvals = fit_res.userkws[
fit_res.model.independent_vars[0]]
xfine = np.linspace(min(xvals), max(xvals), 100)
yvals = fit_res.model.func(
xfine, **fit_res.best_values)
if not hasattr(yvals, '__iter__'):
yvals = np.array(len(xfine)*[yvals])
self.plot_dicts[k] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': xfine,
'yvals': yvals,
'marker': '',
'setlabel': 'Fit - ' + self.legend_label_func(
qbn, row) if row in [0, 1] else '',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'color': 'C0' if row % 2 == 0 else 'C2',
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos}
# ref state plots need to be added at the end, otherwise the
# legend for |g> and |e> is added twice (because of the
# condition do_legend = (row in [0,1]) in the plot dicts above
if self.num_cal_points > 0:
self.plot_dicts.update(ref_states_plot_dicts)
return figure_name
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.leakage_values = np.array([])
labels = ['on', 'off']
for i, qbn in enumerate(self.qb_names):
for prob_label in self.data_to_fit[qbn]:
for row in range(self.proc_data_dict['data_to_fit_reshaped'][
qbn][prob_label].shape[0]):
phases = np.unique(self.proc_data_dict['sweep_points_dict'][
qbn]['msmt_sweep_points'])
data = self.proc_data_dict['data_to_fit_reshaped'][qbn][
prob_label][row, :]
key = 'fit_{}{}_{}_{}'.format(labels[row % 2], row,
prob_label, qbn)
if qbn in self.leakage_qbnames and prob_label == 'pf':
if self.get_param_value('classified_ro', False):
self.leakage_values = np.append(self.leakage_values,
np.mean(data))
else:
# fit leakage qb results to a constant
model = lmfit.models.ConstantModel()
guess_pars = model.guess(data=data, x=phases)
self.fit_dicts[f'Linear_{key}'] = {
'fit_fn': model.func,
'fit_xvals': {'x': phases},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
elif prob_label == 'pe' or prob_label == 'pg':
# fit ramsey qb results to a cosine
model = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=model,
t=phases,
data=data, freq_guess=1/(2*np.pi))
guess_pars['frequency'].value = 1/(2*np.pi)
guess_pars['frequency'].vary = False
self.fit_dicts[f'Cos_{key}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': phases},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
# Cos fits
keys = [k for k in list(self.fit_dicts.keys()) if
(k.startswith('Cos') and k.endswith(qbn))]
if len(keys) > 0:
fit_res_objs = [self.fit_dicts[k]['fit_res'] for k in keys]
# cosine amplitudes
amps = np.array([fr.best_values['amplitude'] for fr
in fit_res_objs])
amps_errs = np.array([fr.params['amplitude'].stderr
for fr in fit_res_objs], dtype=np.float64)
amps_errs = np.nan_to_num(amps_errs)
# amps_errs.dtype = amps.dtype
if qbn in self.ramsey_qbnames:
# phase_diffs
phases = np.array([fr.best_values['phase'] for fr in
fit_res_objs])
phases_errs = np.array([fr.params['phase'].stderr for fr in
fit_res_objs], dtype=np.float64)
phases_errs = np.nan_to_num(phases_errs)
self.proc_data_dict['analysis_params_dict'][
f'phases_{qbn}'] = {
'val': phases, 'stderr': phases_errs}
# compute phase diffs
if getattr(self, 'delta_tau', 0) is not None:
# this can be false for Cyroscope with
# estimation_window == None and odd nr of trunc lengths
phase_diffs = phases[0::2] - phases[1::2]
phase_diffs %= (2*np.pi)
phase_diffs_stderrs = np.sqrt(
np.array(phases_errs[0::2]**2 +
phases_errs[1::2]**2, dtype=np.float64))
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}'] = {
'val': phase_diffs, 'stderr': phase_diffs_stderrs}
# contrast = (cos_amp_g + cos_amp_e)/ 2
contrast = (amps[1::2] + amps[0::2])/2
contrast_stderr = 0.5*np.sqrt(
np.array(amps_errs[0::2]**2 + amps_errs[1::2]**2,
dtype=np.float64))
self.proc_data_dict['analysis_params_dict'][
f'mean_contrast_{qbn}'] = {
'val': contrast, 'stderr': contrast_stderr}
# contrast_loss = (cos_amp_g - cos_amp_e)/ cos_amp_g
population_loss = (amps[1::2] - amps[0::2])/amps[1::2]
x = amps[1::2] - amps[0::2]
x_err = np.array(amps_errs[0::2]**2 + amps_errs[1::2]**2,
dtype=np.float64)
y = amps[1::2]
y_err = amps_errs[1::2]
try:
population_loss_stderrs = np.sqrt(np.array(
((y * x_err) ** 2 + (x * y_err) ** 2) / (y ** 4),
dtype=np.float64))
except:
population_loss_stderrs = float("nan")
self.proc_data_dict['analysis_params_dict'][
f'population_loss_{qbn}'] = \
{'val': population_loss,
'stderr': population_loss_stderrs}
else:
self.proc_data_dict['analysis_params_dict'][
f'amps_{qbn}'] = {
'val': amps[1::2], 'stderr': amps_errs[1::2]}
# Linear fits
keys = [k for k in list(self.fit_dicts.keys()) if
(k.startswith('Linear') and k.endswith(qbn))]
if len(keys) > 0:
fit_res_objs = [self.fit_dicts[k]['fit_res'] for k in keys]
# get leakage
lines = np.array([fr.best_values['c'] for fr
in fit_res_objs])
lines_errs = np.array([fr.params['c'].stderr for
fr in fit_res_objs], dtype=np.float64)
lines_errs = np.nan_to_num(lines_errs)
leakage = lines[0::2]
leakage_errs = np.array(lines_errs[0::2], dtype=np.float64)
leakage_increase = lines[0::2] - lines[1::2]
leakage_increase_errs = np.array(np.sqrt(lines_errs[0::2]**2,
lines_errs[1::2]**2),
dtype=np.float64)
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbn}'] = \
{'val': leakage, 'stderr': leakage_errs}
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbn}'] = {'val': leakage_increase,
'stderr': leakage_increase_errs}
# special case: if classified detector was used, we get leakage
# for free
if qbn in self.leakage_qbnames and self.get_param_value(
'classified_ro', False):
leakage = self.leakage_values[0::2]
leakage_errs = np.zeros(len(leakage))
leakage_increase = self.leakage_values[0::2] - \
self.leakage_values[1::2]
leakage_increase_errs = np.zeros(len(leakage))
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbn}'] = \
{'val': leakage, 'stderr': leakage_errs}
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbn}'] = {'val': leakage_increase,
'stderr': leakage_increase_errs}
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
len_ssp = len(self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{self.ramsey_qbnames[0]}']['val'])
if self.options_dict.get('plot_all_traces', True):
for j, qbn in enumerate(self.qb_names):
if self.options_dict.get('plot_all_probs', True):
for prob_label, data_2d in self.proc_data_dict[
'projected_data_dict'][qbn].items():
figure_name = self.plot_traces(prob_label, data_2d, qbn)
else:
for prob_label, data_2d in self.proc_data_dict[
'data_to_fit'][qbn]:
figure_name = self.plot_traces(prob_label, data_2d, qbn)
if self.do_fitting and len_ssp == 1:
self.options_dict.update({'TwoD': False,
'plot_proj_data': False})
super().prepare_plots()
if qbn in self.ramsey_qbnames:
# add the cphase + leakage textboxes to the
# cphase_qbr_pe figure
figure_name = f'{self.phase_key}_{qbn}_pe'
textstr = '{} = \n{:.2f}'.format(
self.phase_key,
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']['val'][0]*180/np.pi) + \
r'$^{\circ}$' + \
'$\\pm${:.2f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'{self.phase_key}_{qbn}'][
'stderr'][0] * 180 / np.pi) + \
r'$^{\circ}$'
textstr += '\nMean contrast = \n' + \
'{:.3f} $\\pm$ {:.3f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'mean_contrast_{qbn}']['val'][0],
self.proc_data_dict[
'analysis_params_dict'][
f'mean_contrast_{qbn}'][
'stderr'][0])
textstr += '\nContrast loss = \n' + \
'{:.3f} $\\pm$ {:.3f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'population_loss_{qbn}']['val'][0],
self.proc_data_dict[
'analysis_params_dict'][
f'population_loss_{qbn}'][
'stderr'][0])
pdap = self.proc_data_dict.get(
'percent_data_after_presel', False)
if pdap:
textstr += "\nPreselection = \n {" + ', '.join(
f"{qbn}: {v}" for qbn, v in pdap.items()) + '}'
self.plot_dicts['cphase_text_msg_' + qbn] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': -0.1,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
qbl = [gl[0] for gl in self.gates_list
if qbn == gl[1]]
if len(qbl):
qbl = qbl[0]
textstr = 'Leakage =\n{:.5f} $\\pm$ {:.5f}'.format(
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbl}']['val'][0],
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbl}']['stderr'][0])
textstr += '\n\n$\\Delta$Leakage = \n' \
'{:.5f} $\\pm$ {:.5f}'.format(
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbl}']['val'][0],
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbl}']['stderr'][0])
self.plot_dicts['cphase_text_msg_' + qbl] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': 0.175,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
else:
if f'amps_{qbn}' in self.proc_data_dict[
'analysis_params_dict']:
figure_name = f'Leakage_{qbn}_pg'
textstr = 'Amplitude CZ int. OFF = \n' + \
'{:.3f} $\\pm$ {:.3f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'amps_{qbn}']['val'][0],
self.proc_data_dict[
'analysis_params_dict'][
f'amps_{qbn}']['stderr'][0])
self.plot_dicts['swap_text_msg_' + qbn] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': -0.1,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
# plot analysis results
if self.do_fitting and len_ssp > 1:
for qbn in self.qb_names:
ss_pars = self.proc_data_dict['sweep_points_2D_dict'][qbn]
for idx, ss_pname in enumerate(ss_pars):
xvals = self.sp.get_sweep_params_property('values', 1,
ss_pname)
xvals_to_use = deepcopy(xvals)
xlabel = self.sp.get_sweep_params_property('label', 1,
ss_pname)
xunit = self.sp.get_sweep_params_property('unit', 1,
ss_pname)
for param_name, results_dict in self.proc_data_dict[
'analysis_params_dict'].items():
if qbn in param_name:
reps = 1
if len(results_dict['val']) >= len(xvals):
reps = len(results_dict['val']) / len(xvals)
else:
# cyroscope case
if hasattr(self, 'xvals_reduction_func'):
xvals_to_use = self.xvals_reduction_func(
xvals)
else:
log.warning(f'Length mismatch between xvals'
' and analysis param for'
' {param_name}, and no'
' xvals_reduction_func has been'
' defined. Unclear how to'
' reduce xvals.')
plot_name = f'{param_name}_vs_{xlabel}'
if 'phase' in param_name:
yvals = results_dict['val']*180/np.pi - (180 if
len(self.leakage_qbnames) > 0 else 0)
yerr = results_dict['stderr']*180/np.pi
ylabel = param_name + ('-$180^{\\circ}$' if
len(self.leakage_qbnames) > 0 else '')
self.plot_dicts[plot_name+'_hline'] = {
'fig_id': plot_name,
'plotfn': self.plot_hlines,
'y': 0,
'xmin': np.min(xvals_to_use),
'xmax': np.max(xvals_to_use),
'colors': 'gray'}
else:
yvals = results_dict['val']
yerr = results_dict['stderr']
ylabel = param_name
if 'phase' in param_name:
yunit = 'deg'
elif 'freq' in param_name:
yunit = 'Hz'
else:
yunit = ''
self.plot_dicts[plot_name] = {
'plotfn': self.plot_line,
'xvals': np.repeat(xvals_to_use, reps),
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'yerr': yerr if param_name != 'leakage'
else None,
'ylabel': ylabel,
'yunit': yunit,
'title': self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']
+ '-' + qbn,
'linestyle': 'none',
'do_legend': False}
class CPhaseLeakageAnalysis(MultiCZgate_Calib_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def extract_data(self):
super().extract_data()
# Find leakage and ramsey qubit names
# first try the legacy code
leakage_qbname = self.get_param_value('leakage_qbname')
ramsey_qbname = self.get_param_value('ramsey_qbname')
if leakage_qbname is not None and ramsey_qbname is not None:
self.gates_list += [(leakage_qbname, ramsey_qbname)]
self.leakage_qbnames = [leakage_qbname]
self.ramsey_qbnames = [ramsey_qbname]
else:
# new measurement framework
task_list = self.get_param_value('task_list', default_value=[])
for task in task_list:
self.gates_list += [(task['qbl'], task['qbr'])]
self.leakage_qbnames += [task['qbl']]
self.ramsey_qbnames += [task['qbr']]
if len(self.leakage_qbnames) == 0 and len(self.ramsey_qbnames) == 0:
raise ValueError('Please provide either leakage_qbnames or '
'ramsey_qbnames.')
elif len(self.ramsey_qbnames) == 0:
self.ramsey_qbnames = [qbn for qbn in self.qb_names if
qbn not in self.leakage_qbnames]
elif len(self.leakage_qbnames) == 0:
self.leakage_qbnames = [qbn for qbn in self.qb_names if
qbn not in self.ramsey_qbnames]
if len(self.leakage_qbnames) == 0:
self.leakage_qbnames = None
def process_data(self):
super().process_data()
self.phase_key = 'cphase'
if len(self.leakage_qbnames) > 0:
def legend_label_func(qbn, row, gates_list=self.gates_list):
leakage_qbnames = [qb_tup[0] for qb_tup in gates_list]
if qbn in leakage_qbnames:
return f'{qbn} in $|g\\rangle$' if row % 2 != 0 else \
f'{qbn} in $|e\\rangle$'
else:
qbln = [qb_tup for qb_tup in gates_list
if qbn == qb_tup[1]][0][0]
return f'{qbln} in $|g\\rangle$' if row % 2 != 0 else \
f'{qbln} in $|e\\rangle$'
else:
legend_label_func = lambda qbn, row: \
'qbc in $|g\\rangle$' if row % 2 != 0 else \
'qbc in $|e\\rangle$'
self.legend_label_func = legend_label_func
class DynamicPhaseAnalysis(MultiCZgate_Calib_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
if len(self.ramsey_qbnames) == 0:
self.ramsey_qbnames = self.qb_names
self.phase_key = 'dynamic_phase'
self.legend_label_func = lambda qbn, row: 'no FP' \
if row % 2 != 0 else 'with FP'
class CryoscopeAnalysis(DynamicPhaseAnalysis):
def __init__(self, qb_names, *args, **kwargs):
options_dict = kwargs.get('options_dict', {})
unwrap_phases = options_dict.pop('unwrap_phases', True)
options_dict['unwrap_phases'] = unwrap_phases
kwargs['options_dict'] = options_dict
params_dict = {}
for qbn in qb_names:
s = f'Instrument settings.{qbn}'
params_dict[f'ge_freq_{qbn}'] = s+f'.ge_freq'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
self.phase_key = 'delta_phase'
def analyze_fit_results(self):
global_delta_tau = self.get_param_value('estimation_window')
task_list = self.get_param_value('task_list')
for qbn in self.qb_names:
delta_tau = deepcopy(global_delta_tau)
if delta_tau is None:
if task_list is None:
log.warning(f'estimation_window is None and task_list '
f'for {qbn} was not found. Assuming no '
f'estimation_window was used.')
else:
task = [t for t in task_list if t['qb'] == qbn]
if not len(task):
raise ValueError(f'{qbn} not found in task_list.')
delta_tau = task[0].get('estimation_window', None)
self.delta_tau = delta_tau
if self.get_param_value('analyze_fit_results_super', True):
super().analyze_fit_results()
self.proc_data_dict['tvals'] = OrderedDict()
for qbn in self.qb_names:
if delta_tau is None:
trunc_lengths = self.sp.get_sweep_params_property(
'values', 1, f'{qbn}_truncation_length')
delta_tau = np.diff(trunc_lengths)
m = delta_tau > 0
delta_tau = delta_tau[m]
phases = self.proc_data_dict['analysis_params_dict'][
f'phases_{qbn}']
delta_phases_vals = -np.diff(phases['val'])[m]
delta_phases_vals = (delta_phases_vals + np.pi) % (
2 * np.pi) - np.pi
delta_phases_errs = (np.sqrt(
np.array(phases['stderr'][1:] ** 2 +
phases['stderr'][:-1] ** 2, dtype=np.float64)))[m]
self.xvals_reduction_func = lambda xvals: \
((xvals[1:] + xvals[:-1]) / 2)[m]
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}'] = {
'val': delta_phases_vals, 'stderr': delta_phases_errs}
# remove the entries in analysis_params_dict that are not
# relevant for Cryoscope (pop_loss), since
# these will cause a problem with plotting in this case.
self.proc_data_dict['analysis_params_dict'].pop(
f'population_loss_{qbn}', None)
else:
delta_phases = self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']
delta_phases_vals = delta_phases['val']
delta_phases_errs = delta_phases['stderr']
if self.get_param_value('unwrap_phases', False):
if hasattr(delta_tau, '__iter__'):
# unwrap in frequency such that we don't jump more than half
# the nyquist band at any step
df = []
prev_df = 0
for dp, dt in zip(delta_phases_vals, delta_tau):
df.append(dp / (2 * np.pi * dt))
df[-1] += np.round((prev_df - df[-1]) * dt) / dt
prev_df = df[-1]
delta_phases_vals = np.array(df)*(2*np.pi*delta_tau)
else:
delta_phases_vals = np.unwrap((delta_phases_vals + np.pi) %
(2*np.pi) - np.pi)
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']['val'] = delta_phases_vals
delta_freqs = delta_phases_vals/2/np.pi/delta_tau
delta_freqs_errs = delta_phases_errs/2/np.pi/delta_tau
self.proc_data_dict['analysis_params_dict'][f'delta_freq_{qbn}'] = \
{'val': delta_freqs, 'stderr': delta_freqs_errs}
qb_freqs = self.raw_data_dict[f'ge_freq_{qbn}'] + delta_freqs
self.proc_data_dict['analysis_params_dict'][f'freq_{qbn}'] = \
{'val': qb_freqs, 'stderr': delta_freqs_errs}
if hasattr(self, 'xvals_reduction_func') and \
self.xvals_reduction_func is not None:
self.proc_data_dict['tvals'][f'{qbn}'] = \
self.xvals_reduction_func(
self.proc_data_dict['sweep_points_2D_dict'][qbn][
f'{qbn}_truncation_length'])
else:
self.proc_data_dict['tvals'][f'{qbn}'] = \
self.proc_data_dict['sweep_points_2D_dict'][qbn][
f'{qbn}_truncation_length']
self.save_processed_data(key='analysis_params_dict')
self.save_processed_data(key='tvals')
def get_generated_and_measured_pulse(self, qbn=None):
"""
Args:
qbn: specifies for which qubit to calculate the quantities for.
Defaults to the first qubit in qb_names.
Returns: A tuple (tvals_gen, volts_gen, tvals_meas, freqs_meas,
freq_errs_meas, volt_freq_conv)
tvals_gen: time values for the generated fluxpulse
volts_gen: voltages of the generated fluxpulse
tvals_meas: time-values for the measured qubit frequencies
freqs_meas: measured qubit frequencies
freq_errs_meas: errors of measured qubit frequencies
volt_freq_conv: dictionary of fit params for frequency-voltage
conversion
"""
if qbn is None:
qbn = self.qb_names[0]
tvals_meas = self.proc_data_dict['tvals'][qbn]
freqs_meas = self.proc_data_dict['analysis_params_dict'][
f'freq_{qbn}']['val']
freq_errs_meas = self.proc_data_dict['analysis_params_dict'][
f'freq_{qbn}']['stderr']
tvals_gen, volts_gen, volt_freq_conv = self.get_generated_pulse(qbn)
return tvals_gen, volts_gen, tvals_meas, freqs_meas, freq_errs_meas, \
volt_freq_conv
def get_generated_pulse(self, qbn=None, tvals_gen=None, pulse_params=None):
"""
Args:
qbn: specifies for which qubit to calculate the quantities for.
Defaults to the first qubit in qb_names.
Returns: A tuple (tvals_gen, volts_gen, tvals_meas, freqs_meas,
freq_errs_meas, volt_freq_conv)
tvals_gen: time values for the generated fluxpulse
volts_gen: voltages of the generated fluxpulse
volt_freq_conv: dictionary of fit params for frequency-voltage
conversion
"""
if qbn is None:
qbn = self.qb_names[0]
# Flux pulse parameters
# Needs to be changed when support for other pulses is added.
op_dict = {
'pulse_type': f'Instrument settings.{qbn}.flux_pulse_type',
'channel': f'Instrument settings.{qbn}.flux_pulse_channel',
'aux_channels_dict': f'Instrument settings.{qbn}.'
f'flux_pulse_aux_channels_dict',
'amplitude': f'Instrument settings.{qbn}.flux_pulse_amplitude',
'frequency': f'Instrument settings.{qbn}.flux_pulse_frequency',
'phase': f'Instrument settings.{qbn}.flux_pulse_phase',
'pulse_length': f'Instrument settings.{qbn}.'
f'flux_pulse_pulse_length',
'truncation_length': f'Instrument settings.{qbn}.'
f'flux_pulse_truncation_length',
'buffer_length_start': f'Instrument settings.{qbn}.'
f'flux_pulse_buffer_length_start',
'buffer_length_end': f'Instrument settings.{qbn}.'
f'flux_pulse_buffer_length_end',
'extra_buffer_aux_pulse': f'Instrument settings.{qbn}.'
f'flux_pulse_extra_buffer_aux_pulse',
'pulse_delay': f'Instrument settings.{qbn}.'
f'flux_pulse_pulse_delay',
'basis_rotation': f'Instrument settings.{qbn}.'
f'flux_pulse_basis_rotation',
'gaussian_filter_sigma': f'Instrument settings.{qbn}.'
f'flux_pulse_gaussian_filter_sigma',
}
params_dict = {
'volt_freq_conv': f'Instrument settings.{qbn}.'
f'fit_ge_freq_from_flux_pulse_amp',
'flux_channel': f'Instrument settings.{qbn}.'
f'flux_pulse_channel',
'instr_pulsar': f'Instrument settings.{qbn}.'
f'instr_pulsar',
**op_dict
}
dd = self.get_data_from_timestamp_list(params_dict)
if pulse_params is not None:
dd.update(pulse_params)
dd['element_name'] = 'element'
pulse = seg_mod.UnresolvedPulse(dd).pulse_obj
pulse.algorithm_time(0)
if tvals_gen is None:
clk = self.clock(channel=dd['channel'], pulsar=dd['instr_pulsar'])
tvals_gen = np.arange(0, pulse.length, 1 / clk)
volts_gen = pulse.chan_wf(dd['flux_channel'], tvals_gen)
volt_freq_conv = dd['volt_freq_conv']
return tvals_gen, volts_gen, volt_freq_conv
class CZDynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
# convert phases to radians
for qbn in self.qb_names:
sweep_dict = self.proc_data_dict['sweep_points_dict'][qbn]
sweep_dict['sweep_points'] *= np.pi/180
# get data with flux pulse and w/o flux pulse
self.data_with_fp = OrderedDict()
self.data_no_fp = OrderedDict()
for qbn in self.qb_names:
all_data = self.proc_data_dict['data_to_fit'][qbn]
if self.num_cal_points != 0:
all_data = all_data[:-self.num_cal_points]
self.data_with_fp[qbn] = all_data[0: len(all_data)//2]
self.data_no_fp[qbn] = all_data[len(all_data)//2:]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
sweep_points = np.unique(
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'])
for i, data in enumerate([self.data_with_fp[qbn],
self.data_no_fp[qbn]]):
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data, freq_guess=1/(2*np.pi))
guess_pars['frequency'].value = 1/(2*np.pi)
guess_pars['frequency'].vary = False
key = 'cos_fit_{}_{}'.format(qbn, 'wfp' if i == 0 else 'nofp')
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase'] = {
'val': (self.fit_dicts[f'cos_fit_{qbn}_wfp'][
'fit_res'].best_values['phase'] -
self.fit_dicts[f'cos_fit_{qbn}_nofp'][
'fit_res'].best_values['phase']),
'stderr': np.sqrt(
self.fit_dicts[f'cos_fit_{qbn}_wfp'][
'fit_res'].params['phase'].stderr**2 +
self.fit_dicts[f'cos_fit_{qbn}_nofp'][
'fit_res'].params['phase'].stderr**2)
}
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
super().prepare_plots()
for qbn in self.qb_names:
for i, data in enumerate([self.data_with_fp[qbn],
self.data_no_fp[qbn]]):
fit_key = f'cos_fit_{qbn}_wfp' if i == 0 else \
f'cos_fit_{qbn}_nofp'
plot_name_suffix = 'fit_'+'wfp' if i == 0 else 'nofp'
cal_pts_data = self.proc_data_dict['data_to_fit'][qbn][
-self.num_cal_points:]
base_plot_name = 'Dynamic_phase_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=np.concatenate((data,cal_pts_data)),
sweep_points=np.unique(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points']),
data_label='with flux pulse' if i == 0 else 'no flux pulse',
plot_name_suffix=qbn + plot_name_suffix,
qb_name=qbn,
do_legend_cal_states=(i == 0))
if self.do_fitting:
fit_res = self.fit_dicts[fit_key]['fit_res']
self.plot_dicts[plot_name_suffix + '_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res ,
'setlabel': 'cosine fit',
'color': 'r',
'do_legend': i == 0}
textstr = 'Dynamic phase {}:\n\t{:.2f}'.format(
qbn,
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase']['val']*180/np.pi) + \
r'$^{\circ}$' + \
'$\\pm${:.2f}'.format(
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase']['stderr']*180/np.pi) + \
r'$^{\circ}$'
fpl = self.get_param_value('flux_pulse_length')
if fpl is not None:
textstr += '\n length: {:.2f} ns'.format(fpl*1e9)
fpa = self.get_param_value('flux_pulse_amp')
if fpa is not None:
textstr += '\n amp: {:.4f} V'.format(fpa)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.15,
'xpos': -0.05,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
for plot_name in list(self.plot_dicts)[::-1]:
if self.plot_dicts[plot_name].get('do_legend', False):
break
self.plot_dicts[plot_name].update(
{'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'})
class MultiQutrit_Timetrace_Analysis(ba.BaseDataAnalysis):
"""
Analysis class for timetraces, in particular use to compute
Optimal SNR integration weights.
"""
def __init__(self, qb_names=None, auto=True, **kwargs):
"""
Initializes the timetrace analysis class.
Args:
qb_names (list): name of the qubits to analyze (can be a subset
of the measured qubits)
auto (bool): Start analysis automatically
**kwargs:
t_start: timestamp of the first timetrace
t_stop: timestamp of the last timetrace to analyze
options_dict (dict): relevant parameters:
acq_weights_basis (list, dict):
list of basis vectors used to compute optimal weight.
e.g. ["ge", 'gf'], the first basis vector will be the
"e" timetrace minus the "g" timetrace and the second basis
vector is f - g. The first letter in each basis state is the
"reference state", i.e. the one of which the timetrace
is substracted. Can also be passed as a dictionary where
keys are the qubit names and the values are lists of basis states
in case different bases should be used for different qubits.
orthonormalize (bool): Whether or not to orthonormalize the
weight basis
tmax (float): time boundary for the plot (not the weights)
in seconds.
scale_weights (bool): scales the weights near unity to avoid
loss of precision on FPGA if weights are too small
"""
if qb_names is not None:
self.params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.' + qbn
for trans_name in ['ge', 'ef']:
self.params_dict[f'ro_mod_freq_' + qbn] = \
s + f'.ro_mod_freq'
self.numeric_params = list(self.params_dict)
self.qb_names = qb_names
super().__init__(**kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
# get all qubits from cal_points of first timetrace
cp = CalibrationPoints.from_string(
self.get_param_value('cal_points', None, 0))
self.qb_names = deepcopy(cp.qb_names)
self.channel_map = self.get_param_value('channel_map', None,
index=0)
if self.channel_map is None:
# assume same channel map for all timetraces (pick 0th)
value_names = self.raw_data_dict[0]['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
pdd['analysis_params_dict'] = dict()
ana_params = pdd['analysis_params_dict']
ana_params['timetraces'] = defaultdict(dict)
ana_params['optimal_weights'] = defaultdict(dict)
ana_params['optimal_weights_basis_labels'] = defaultdict(dict)
for qbn in self.qb_names:
# retrieve time traces
for i, rdd in enumerate(self.raw_data_dict):
ttrace_per_ro_ch = [rdd["measured_data"][ch]
for ch in self.channel_map[qbn]]
if len(ttrace_per_ro_ch) != 2:
raise NotImplementedError(
'This analysis does not support optimal weight '
f'measurement based on {len(ttrace_per_ro_ch)} ro channels.'
f' Try again with 2 RO channels.')
cp = CalibrationPoints.from_string(
self.get_param_value('cal_points', None, i))
# get state of qubit. There can be only one cal point per sequence
# when using uhf for time traces so it is the 0th state
qb_state = cp.states[0][cp.qb_names.index(qbn)]
# store all timetraces in same pdd for convenience
ana_params['timetraces'][qbn].update(
{qb_state: ttrace_per_ro_ch[0] + 1j *ttrace_per_ro_ch[1]})
timetraces = ana_params['timetraces'][qbn] # for convenience
basis_labels = self.get_param_value('acq_weights_basis', None, 0)
if basis_labels is None:
# guess basis labels from # states measured
basis_labels = ["ge", "ef"] \
if len(ana_params['timetraces'][qbn]) > 2 else ['ge']
if isinstance(basis_labels, dict):
# if different basis for qubits, then select the according one
basis_labels = basis_labels[qbn]
# check that states from the basis are included in mmnt
for bs in basis_labels:
for qb_s in bs:
assert qb_s in timetraces,\
f'State: {qb_s} on {qbn} was not provided in the given ' \
f'timestamps but was requested as part of the basis' \
f' {basis_labels}. Please choose another weight basis.'
basis = np.array([timetraces[b[1]] - timetraces[b[0]]
for b in basis_labels])
# orthonormalize if required
if self.get_param_value("orthonormalize", False):
# We need to consider the integration weights as a vector of
# real numbers to ensure the Gram-Schmidt transformation of the
# weights leads to a linear transformation of the integrated
# readout results (relates to how integration is done on UHF,
# see One Note: Surface 17/ATC75 M136 S17HW02 Cooldown 5/
# 210330 Notes on orthonormalizing readout weights
basis_real = np.hstack((basis.real, basis.imag), )
basis_real = math.gram_schmidt(basis_real.T).T
basis = basis_real[:,:basis_real.shape[1]//2] + \
1j*basis_real[:,basis_real.shape[1]//2:]
basis_labels = [bs + "_ortho" if bs != basis_labels[0] else bs
for bs in basis_labels]
# scale if required
if self.get_param_value('scale_weights', True):
k = np.amax([(np.max(np.abs(b.real)),
np.max(np.abs(b.imag))) for b in basis])
basis /= k
ana_params['optimal_weights'][qbn] = basis
ana_params['optimal_weights_basis_labels'][qbn] = basis_labels
self.save_processed_data()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
ana_params = self.proc_data_dict['analysis_params_dict']
for qbn in self.qb_names:
mod_freq = float(
rdd[0].get(f'ro_mod_freq_{qbn}',
self.get_hdf_param_value(f"Instrument settings/{qbn}",
'ro_mod_freq')))
tbase = rdd[0]['hard_sweep_points']
basis_labels = pdd["analysis_params_dict"][
'optimal_weights_basis_labels'][qbn]
title = 'Optimal SNR weights ' + qbn + \
"".join(['\n' + rddi["timestamp"] for rddi in rdd]) \
+ f'\nWeight Basis: {basis_labels}'
plot_name = f"weights_{qbn}"
xlabel = "Time, $t$"
modulation = np.exp(2j * np.pi * mod_freq * tbase)
for ax_id, (state, ttrace) in \
enumerate(ana_params["timetraces"][qbn].items()):
for func, label in zip((np.real, np.imag), ('I', "Q")):
# plot timetraces for each state, I and Q channels
self.plot_dicts[f"{plot_name}_{state}_{label}"] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': tbase,
"marker": "",
'yvals': func(ttrace*modulation),
'ylabel': 'Voltage, $V$',
'yunit': 'V',
"sharex": True,
"setdesc": label + f"_{state}",
"setlabel": "",
"do_legend":True,
"legend_pos": "upper right",
'numplotsx': 1,
'numplotsy': len(rdd) + 1, # #states + 1 for weights
'plotsize': (10,
(len(rdd) + 1) * 3), # 3 inches per plot
'title': title if ax_id == 0 else ""}
ax_id = len(ana_params["timetraces"][qbn]) # id plots for weights
for i, weights in enumerate(ana_params['optimal_weights'][qbn]):
for func, label in zip((np.real, np.imag), ('I', "Q")):
self.plot_dicts[f"{plot_name}_weights_{label}_{i}"] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': tbase,
'xlabel': xlabel,
"setlabel": "",
"marker": "",
'xunit': 's',
'yvals': func(weights * modulation),
'ylabel': 'Voltage, $V$ (arb.u.)',
"sharex": True,
"xrange": (0, self.get_param_value('tmax', 1200e-9, 0)),
"setdesc": label + f"_{i+1}",
"do_legend": True,
"legend_pos": "upper right",
}
class MultiQutrit_Singleshot_Readout_Analysis(MultiQubit_TimeDomain_Analysis):
"""
Analysis class for parallel SSRO qutrit/qubit calibration. It is a child class
from the tda.MultiQubit_Timedomain_Analysis as it uses the same functions to
- preprocess the data to remove active reset/preselection
- extract the channel map
- reorder the data per qubit
Note that in the future, it might be useful to transfer these functionalities
to the base analysis.
"""
def __init__(self,
options_dict: dict = None, auto=True, **kw):
'''
options dict options:
'nr_bins' : number of bins to use for the histograms
'post_select' :
'post_select_threshold' :
'nr_samples' : amount of different samples (e.g. ground and excited = 2)
'sample_0' : index of first sample (ground-state)
'sample_1' : index of second sample (first excited-state)
'max_datapoints' : maximum amount of datapoints for culumative fit
'hist_scale' : scale for the y-axis of the 1D histograms: "linear" or "log"
'verbose' : see BaseDataAnalysis
'presentation_mode' : see BaseDataAnalysis
'classif_method': how to classify the data.
'ncc' : default. Nearest Cluster Center
'gmm': gaussian mixture model.
'threshold': finds optimal vertical and horizontal thresholds.
'classif_kw': kw to pass to the classifier
see BaseDataAnalysis for more.
'''
super().__init__(options_dict=options_dict, auto=False,
**kw)
self.params_dict = {
'measurementstring': 'measurementstring',
'measured_data': 'measured_data',
'value_names': 'value_names',
'value_units': 'value_units'}
self.numeric_params = []
self.DEFAULT_CLASSIF = "gmm"
self.classif_method = self.options_dict.get("classif_method",
self.DEFAULT_CLASSIF)
self.create_job(options_dict=options_dict, auto=auto, **kw)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
self.preselection = \
self.get_param_value("preparation_params",
{}).get("preparation_type", "wait") == "preselection"
default_states_info = defaultdict(dict)
default_states_info.update({"g": {"label": r"$|g\rangle$"},
"e": {"label": r"$|e\rangle$"},
"f": {"label": r"$|f\rangle$"}
})
self.states_info = \
self.get_param_value("states_info",
{qbn: deepcopy(default_states_info)
for qbn in self.qb_names})
def process_data(self):
"""
Create the histograms based on the raw data
"""
######################################################
# Separating data into shots for each level #
######################################################
super().process_data()
del self.proc_data_dict['data_to_fit'] # not used in this analysis
n_states = len(self.cp.states)
# prepare data in convenient format, i.e. arrays per qubit and per state
# e.g. {'qb1': {'g': np.array of shape (n_shots, n_ro_ch}, ...}, ...}
shots_per_qb = dict() # store shots per qb and per state
presel_shots_per_qb = dict() # store preselection ro
means = defaultdict(OrderedDict) # store mean per qb for each ro_ch
pdd = self.proc_data_dict # for convenience of notation
for qbn in self.qb_names:
# shape is (n_shots, n_ro_ch) i.e. one column for each ro_ch
shots_per_qb[qbn] = \
np.asarray(list(
pdd['meas_results_per_qb'][qbn].values())).T
# make 2D array in case only one channel (1D array)
if len(shots_per_qb[qbn].shape) == 1:
shots_per_qb[qbn] = np.expand_dims(shots_per_qb[qbn],
axis=-1)
for i, qb_state in enumerate(self.cp.get_states(qbn)[qbn]):
means[qbn][qb_state] = np.mean(shots_per_qb[qbn][i::n_states],
axis=0)
if self.preselection:
# preselection shots were removed so look at raw data
# and look at only the first out of every two readouts
presel_shots_per_qb[qbn] = \
np.asarray(list(
pdd['meas_results_per_qb_raw'][qbn].values())).T[::2]
# make 2D array in case only one channel (1D array)
if len(presel_shots_per_qb[qbn].shape) == 1:
presel_shots_per_qb[qbn] = \
np.expand_dims(presel_shots_per_qb[qbn], axis=-1)
# create placeholders for analysis data
pdd['analysis_params'] = dict()
pdd['data'] = defaultdict(dict)
pdd['analysis_params']['state_prob_mtx'] = defaultdict(dict)
pdd['analysis_params']['classifier_params'] = defaultdict(dict)
pdd['analysis_params']['means'] = defaultdict(dict)
pdd['analysis_params']['snr'] = defaultdict(dict)
pdd['analysis_params']["n_shots"] = len(shots_per_qb[qbn])
pdd['analysis_params']['slopes'] = defaultdict(dict)
self.clf_ = defaultdict(dict)
# create placeholders for analysis with preselection
if self.preselection:
pdd['data_masked'] = defaultdict(dict)
pdd['analysis_params']['state_prob_mtx_masked'] = defaultdict(dict)
pdd['analysis_params']['n_shots_masked'] = defaultdict(dict)
n_shots = len(shots_per_qb[qbn]) // n_states
for qbn, qb_shots in shots_per_qb.items():
# create mapping to integer following ordering in cal_points.
# Notes:
# 1) the state_integer should to the order of pdd[qbn]['means'] so that
# when passing the init_means to the GMM model, it is ensured that each
# gaussian component will predict the state_integer associated to that state
# 2) the mapping cannot be preestablished because the GMM predicts labels
# in range(n_components). For instance, if a qubit has states "g", "f"
# then the model will predicts 0's and 1's, so the typical g=0, e=1, f=2
# mapping would fail. The number of different states can be different
# for each qubit and therefore the mapping should also be done per qubit.
state_integer = 0
for state in means[qbn].keys():
self.states_info[qbn][state]["int"] = state_integer
state_integer += 1
# note that if some states are repeated, they are assigned the same label
qb_states_integer_repr = \
[self.states_info[qbn][s]["int"]
for s in self.cp.get_states(qbn)[qbn]]
prep_states = np.tile(qb_states_integer_repr, n_shots)
pdd['analysis_params']['means'][qbn] = deepcopy(means[qbn])
pdd['data'][qbn] = dict(X=deepcopy(qb_shots),
prep_states=prep_states)
# self.proc_data_dict['keyed_data'] = deepcopy(data)
assert np.ndim(qb_shots) == 2, "Data must be a two D array. " \
"Received shape {}, ndim {}"\
.format(qb_shots.shape, np.ndim(qb_shots))
pred_states, clf_params, clf = \
self._classify(qb_shots, prep_states,
method=self.classif_method, qb_name=qbn,
**self.options_dict.get("classif_kw", dict()))
# order "unique" states to have in usual order "gef" etc.
state_labels_ordered = self._order_state_labels(
list(means[qbn].keys()))
# translate to corresponding integers
state_labels_ordered_int = [self.states_info[qbn][s]['int'] for s in
state_labels_ordered]
fm = self.fidelity_matrix(prep_states, pred_states,
labels=state_labels_ordered_int)
# save fidelity matrix and classifier
pdd['analysis_params']['state_prob_mtx'][qbn] = fm
pdd['analysis_params']['classifier_params'][qbn] = clf_params
if 'means_' in clf_params:
pdd['analysis_params']['snr'][qbn] = \
self._extract_snr(clf, state_labels_ordered)
pdd['analysis_params']['slopes'][qbn] = self._extract_slopes(
clf, state_labels_ordered)
self.clf_[qbn] = clf
if self.preselection:
#re do with classification first of preselection and masking
pred_presel = self.clf_[qbn].predict(presel_shots_per_qb[qbn])
presel_filter = \
pred_presel == self.states_info[qbn]['g']['int']
if np.sum(presel_filter) == 0:
log.warning(f"{qbn}: No data left after preselection! "
f"Skipping preselection data & figures.")
continue
qb_shots_masked = qb_shots[presel_filter]
prep_states = prep_states[presel_filter]
pred_states = self.clf_[qbn].predict(qb_shots_masked)
fm = self.fidelity_matrix(prep_states, pred_states,
labels=state_labels_ordered_int)
pdd['data_masked'][qbn] = dict(X=deepcopy(qb_shots_masked),
prep_states=deepcopy(prep_states))
pdd['analysis_params']['state_prob_mtx_masked'][qbn] = fm
pdd['analysis_params']['n_shots_masked'][qbn] = \
qb_shots_masked.shape[0]
self.save_processed_data()
@staticmethod
def _extract_snr(gmm=None, state_labels=None, clf_params=None,):
"""
Extracts SNR between pairs of states. SNR is defined as dist(m1,
m2)/mean(std1, std2), where dist = L2 norm, m1, m2 are the means of the
pair of states and std1, std2 are the "standard deviation" (obtained
from the confidence ellipse of the covariance if 2D).
:param gmm: Gaussian mixture model
:param clf_params: Classifier parameters. Not implemented but could
reconstruct gmm from clf params. Would be more analysis friendly.
:param state_labels (list): state labels for the SNR dict. If not provided,
tuples indicating the index of the state pairs is used.
:return: snr (dict): e.g. {"ge": 2.4} or {"ge": 3, "ef": 2, "gf": 4}
"""
snr = {}
if clf_params is not None:
raise NotImplementedError("Look in a_tools.predict_probas to "
"recreate GMM from clf_params")
means = MultiQutrit_Singleshot_Readout_Analysis._get_means(gmm)
covs = MultiQutrit_Singleshot_Readout_Analysis._get_covariances(gmm)
n_states = len(means)
if n_states >= 2:
state_pairs = list(itertools.combinations(np.arange(n_states), 2))
for sp in state_pairs:
m0, m1 = means[sp[0]], means[sp[1]]
if len(m0) == 1:
# pad second element to treat as 2d
m0, m1 = np.concatenate([m0, [0]]), np.concatenate([m1, [0]])
dist = np.linalg.norm(m0 - m1)
std0_candidates = math.find_intersect_line_ellipse(
math.slope(m0- m1),
*math.get_ellipse_radii_and_rotation(covs[sp[0]]))
idx = np.argmin([np.linalg.norm(std0_candidates[0] - m1),
np.linalg.norm(std0_candidates[1] -
m1)]).flatten()[0]
std0 = np.linalg.norm(std0_candidates[idx])
std1_candidates = math.find_intersect_line_ellipse(
math.slope(m0 - m1),
*math.get_ellipse_radii_and_rotation(covs[sp[1]]))
idx = np.argmin([np.linalg.norm(std0_candidates[0] - m0),
np.linalg.norm(std0_candidates[1] -
m1)]).flatten()[0]
std1 = np.linalg.norm(std1_candidates[idx])
label = state_labels[sp[0]] + state_labels[sp[1]] \
if state_labels is not None else sp
snr.update({label: dist/np.mean([std0, std1])})
return snr
@staticmethod
def _extract_slopes(gmm=None, state_labels=None, clf_params=None, means=None):
"""
Extracts slopes of line connecting two means of different states.
:param gmm: Gaussian mixture model from which means are extracted
:param clf_params: Classifier parameters from which means are extracted.
:param state_labels (list): state labels for the SNR dict. If not provided,
tuples indicating the index of the state pairs is used.
:param means (array):
:return: slopes (dict): e.g. {"ge": 0.1} or {"ge": 0.1, "ef": 2,
"gf": 0.4}
"""
slopes = {}
if clf_params is not None:
if not 'means_' in clf_params:
raise ValueError(f"could not find 'means_' in clf_params:"
f" {clf_params}. Please pass in means directly "
f"provide a classifier that fits means.")
means = clf_params.get('means_')
if gmm is not None:
means = MultiQutrit_Singleshot_Readout_Analysis._get_means(gmm)
if means is None:
raise ValueError('Please provide one of kwarg gmm, clf_params or '
'means to extract the means of the different '
'distributions')
n_states = len(means)
if n_states >= 2:
state_pairs = list(itertools.combinations(np.arange(n_states), 2))
for sp in state_pairs:
m0, m1 = means[sp[0]], means[sp[1]]
if len(m0) == 1:
# pad second element to treat as 2d
m0, m1 = np.concatenate([m0, [0]]), np.concatenate([m1, [0]])
label = state_labels[sp[0]] + state_labels[sp[1]] \
if state_labels is not None else sp
slopes.update({label: math.slope(m0 - m1)})
return slopes
def _classify(self, X, prep_state, method, qb_name, **kw):
"""
Args:
X: measured data to classify
prep_state: prepared states (true values)
type: classification method
qb_name: name of the qubit to classify
Returns:
"""
if np.ndim(X) == 1:
X = X.reshape((-1,1))
params = dict()
if method == 'ncc':
ncc = SSROQutrit.NCC(
self.proc_data_dict['analysis_params']['means'][qb_name])
pred_states = ncc.predict(X)
# self.clf_ = ncc
return pred_states, dict(), ncc
elif method == 'gmm':
cov_type = kw.pop("covariance_type", "tied")
# full allows full covariance matrix for each level. Other options
# see GM documentation
# assumes if repeated state, should be considered of the same component
# this classification method should not be used for multiplexed SSRO
# analysis
n_qb_states = len(np.unique(self.cp.get_states(qb_name)[qb_name]))
# give same weight to each class by default
weights_init = kw.pop("weights_init",
np.ones(n_qb_states)/n_qb_states)
gm = GM(n_components=n_qb_states,
covariance_type=cov_type,
random_state=0,
weights_init=weights_init,
means_init=[mu for _, mu in
self.proc_data_dict['analysis_params']
['means'][qb_name].items()], **kw)
gm.fit(X)
pred_states = np.argmax(gm.predict_proba(X), axis=1)
params['means_'] = gm.means_
params['covariances_'] = gm.covariances_
params['covariance_type'] = gm.covariance_type
params['weights_'] = gm.weights_
params['precisions_cholesky_'] = gm.precisions_cholesky_
return pred_states, params, gm
elif method == "threshold":
tree = DTC(max_depth=kw.pop("max_depth", X.shape[1]),
random_state=0, **kw)
tree.fit(X, prep_state)
pred_states = tree.predict(X)
params["thresholds"], params["mapping"] = \
self._extract_tree_info(tree, self.cp.get_states(qb_name)[qb_name])
if len(params["thresholds"]) != X.shape[1]:
msg = "Best 2 thresholds to separate this data lie on axis {}" \
", most probably because the data is not well separated." \
"The classifier attribute clf_ can still be used for " \
"classification (which was done to obtain the state " \
"assignment probability matrix), but only the threshold" \
" yielding highest gini impurity decrease was returned." \
"\nTo circumvent this problem, you can either choose" \
" a second threshold manually (fidelity will likely be " \
"worse), make the data more separable, or use another " \
"classification method."
log.warning(msg.format(list(params['thresholds'].keys())[0]))
return pred_states, params, tree
elif method == "threshold_brute":
raise NotImplementedError()
else:
raise NotImplementedError("Classification method: {} is not "
"implemented. Available methods: {}"
.format(method, ['ncc', 'gmm',
'threshold']))
@staticmethod
def _get_covariances(gmm, cov_type=None):
return SSROQutrit._get_covariances(gmm, cov_type=cov_type)
@staticmethod
def _get_means(gmm):
return gmm.means_
@staticmethod
def fidelity_matrix(prep_states, pred_states, levels=('g', 'e', 'f'),
plot=False, labels=None, normalize=True):
return SSROQutrit.fidelity_matrix(prep_states, pred_states,
levels=levels, plot=plot,
normalize=normalize, labels=labels)
@staticmethod
def plot_fidelity_matrix(fm, target_names,
title="State Assignment Probability Matrix",
auto_shot_info=True, ax=None,
cmap=None, normalize=True, show=False):
return SSROQutrit.plot_fidelity_matrix(
fm, target_names, title=title, ax=ax,
auto_shot_info=auto_shot_info,
cmap=cmap, normalize=normalize, show=show)
@staticmethod
def _extract_tree_info(tree_clf, class_names=None):
return SSROQutrit._extract_tree_info(tree_clf,
class_names=class_names)
@staticmethod
def _to_codeword_idx(tuple):
return SSROQutrit._to_codeword_idx(tuple)
@staticmethod
def plot_scatter_and_marginal_hist(data, y_true=None, plot_fitting=False,
**kwargs):
return SSROQutrit.plot_scatter_and_marginal_hist(
data, y_true=y_true, plot_fitting=plot_fitting, **kwargs)
@staticmethod
def plot_clf_boundaries(X, clf, ax=None, cmap=None, spacing=None):
return SSROQutrit.plot_clf_boundaries(X, clf, ax=ax, cmap=cmap,
spacing=spacing)
@staticmethod
def plot_std(mean, cov, ax, n_std=1.0, facecolor='none', **kwargs):
return SSROQutrit.plot_std(mean, cov, ax,n_std=n_std,
facecolor=facecolor, **kwargs)
@staticmethod
def plot_1D_hist(data, y_true=None, plot_fitting=True,
**kwargs):
return SSROQutrit.plot_1D_hist(data, y_true=y_true,
plot_fitting=plot_fitting, **kwargs)
@staticmethod
def _order_state_labels(states_labels,
order="gefhabcdijklmnopqrtuvwxyz0123456789"):
"""
Orders state labels according to provided ordering. e.g. for default
("f", "e", "g") would become ("g", "e", "f")
Args:
states_labels (list, tuple): list of states_labels
order (str): custom string order
Returns:
"""
try:
indices = [order.index(s) for s in states_labels]
order_for_states = np.argsort(indices).astype(np.int32)
return np.array(states_labels)[order_for_states]
except Exception as e:
log.error(f"Could not find order in state_labels:"
f"{states_labels}. Probably because one or several "
f"states are not part of '{order}'. Error: {e}."
f" Returning same as input order")
return states_labels
def plot(self, **kwargs):
if not self.get_param_value("plot", True):
return # no plotting if "plot" is False
cmap = plt.get_cmap('tab10')
show = self.options_dict.get("show", False)
pdd = self.proc_data_dict
for qbn in self.qb_names:
n_qb_states = len(np.unique(self.cp.get_states(qbn)[qbn]))
tab_x = a_tools.truncate_colormap(cmap, 0,
n_qb_states/10)
kwargs = {
"states": list(pdd["analysis_params"]['means'][qbn].keys()),
"xlabel": "Integration Unit 1, $u_1$",
"ylabel": "Integration Unit 2, $u_2$",
"scale": self.options_dict.get("hist_scale", "log"),
"cmap":tab_x}
data_keys = [k for k in list(pdd.keys()) if
k.startswith("data") and qbn in pdd[k]]
for dk in data_keys:
data = pdd[dk][qbn]
title = self.raw_data_dict['timestamp'] + f" {qbn} " + dk + \
"\n{} classifier".format(self.classif_method)
kwargs.update(dict(title=title))
# plot data and histograms
n_shots_to_plot = self.get_param_value('n_shots_to_plot', None)
if n_shots_to_plot is not None:
n_shots_to_plot *= n_qb_states
if data['X'].shape[1] == 1:
if self.classif_method == "gmm":
kwargs['means'] = self._get_means(self.clf_[qbn])
kwargs['std'] = np.sqrt(self._get_covariances(self.clf_[qbn]))
else:
# no Gaussian distribution can be plotted
kwargs['plot_fitting'] = False
kwargs['colors'] = cmap(np.unique(data['prep_states']))
fig, main_ax = self.plot_1D_hist(data['X'][:n_shots_to_plot],
data["prep_states"][:n_shots_to_plot],
**kwargs)
else:
fig, axes = self.plot_scatter_and_marginal_hist(
data['X'][:n_shots_to_plot],
data["prep_states"][:n_shots_to_plot],
**kwargs)
# plot clf_boundaries
main_ax = fig.get_axes()[0]
self.plot_clf_boundaries(data['X'], self.clf_[qbn], ax=main_ax,
cmap=tab_x)
# plot means and std dev
data_means = pdd['analysis_params']['means'][qbn]
try:
clf_means = self._get_means(self.clf_[qbn])
except Exception as e: # not a gmm model--> no clf_means.
clf_means = []
try:
covs = self._get_covariances(self.clf_[qbn])
except Exception as e: # not a gmm model--> no cov.
covs = []
for i, data_mean in enumerate(data_means.values()):
main_ax.scatter(data_mean[0], data_mean[1], color='w', s=80)
if len(clf_means):
main_ax.scatter(clf_means[i][0], clf_means[i][1],
color='k', s=80)
if len(covs) != 0:
self.plot_std(clf_means[i] if len(clf_means)
else data_mean,
covs[i],
n_std=1, ax=main_ax,
edgecolor='k', linestyle='--',
linewidth=1)
# plot thresholds and mapping
plt_fn = {0: main_ax.axvline, 1: main_ax.axhline}
thresholds = pdd['analysis_params'][
'classifier_params'][qbn].get("thresholds", dict())
mapping = pdd['analysis_params'][
'classifier_params'][qbn].get("mapping", dict())
for k, thres in thresholds.items():
plt_fn[k](thres, linewidth=2,
label="threshold i.u. {}: {:.5f}".format(k, thres),
color='k', linestyle="--")
main_ax.legend(loc=[0.2,-0.62])
ax_frac = {0: (0.07, 0.1), # locations for codewords
1: (0.83, 0.1),
2: (0.07, 0.9),
3: (0.83, 0.9)}
for cw, state in mapping.items():
main_ax.annotate("0b{:02b}".format(cw) + f":{state}",
ax_frac[cw], xycoords='axes fraction')
self.figs[f'{qbn}_{self.classif_method}_classifier_{dk}'] = fig
if show:
plt.show()
# state assignment prob matrix
title = self.raw_data_dict['timestamp'] + "\n{} State Assignment" \
" Probability Matrix\nTotal # shots:{}"\
.format(self.classif_method,
self.proc_data_dict['analysis_params']['n_shots'])
fig = self.plot_fidelity_matrix(
self.proc_data_dict['analysis_params']['state_prob_mtx'][qbn],
self._order_state_labels(kwargs['states']),
title=title,
show=show,
auto_shot_info=False)
self.figs[f'{qbn}_state_prob_matrix_{self.classif_method}'] = fig
if self.preselection and \
len(pdd['analysis_params']['state_prob_mtx_masked'][qbn]) != 0:
title = self.raw_data_dict['timestamp'] + \
"\n{} State Assignment Probability Matrix Masked"\
"\nTotal # shots:{}".format(
self.classif_method,
self.proc_data_dict['analysis_params']['n_shots_masked'][qbn])
fig = self.plot_fidelity_matrix(
pdd['analysis_params']['state_prob_mtx_masked'][qbn],
self._order_state_labels(kwargs['states']),
title=title, show=show, auto_shot_info=False)
fig_key = f'{qbn}_state_prob_matrix_masked_{self.classif_method}'
self.figs[fig_key] = fig
class MultiQutritActiveResetAnalysis(MultiQubit_TimeDomain_Analysis):
"""
Analyzes the performance of (two- or three-level) active reset
(Measured via pycqed.measurement.calibration.single_qubit_gates.ActiveReset,
see the corresponding doc string for details about the sequence).
Extracts the reset rate (how fast is the reset) and the residual excited
state population.
Helps to choose the number of reset repetitions for experiments making use
of active reset, by considering the tradeoff between the time required
for reset and the residual excited state population.
"""
def __init__(self, options_dict: dict = None, auto=True, **kw):
'''
options dict options:
plot_raw_shots (bool): whether or not to plot histograms/scatter
plots of raw shots. False by default. Slows down the analysis as
it creates one plot per readout.
see BaseDataAnalysis for more.
'''
if options_dict is None:
options_dict = {}
options_dict.update({"TwoD": True})
super().__init__(options_dict=options_dict, auto=False,
**kw)
self.create_job(options_dict=options_dict, auto=auto, **kw)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
# try to get qb_names from cal_points
try:
cp = CalibrationPoints.from_string(
self.get_param_value('cal_points', None))
self.qb_names = deepcopy(cp.qb_names)
except:
# try to get them from metadata
self.qb_names = self.get_param_value('ro_qubits', None)
if self.qb_names is None:
raise ValueError('Could not find qb_names. Please'
'provide qb_names to the analysis'
'or ensure they are in calibration points'
'or the metadata under "qb_names" '
'or "ro_qubits"')
def process_data(self):
super().process_data()
# reshape data per prepared state before reset for each pg, pe, (pf),
# for the projected data dict and possibly the readout-corrected version
pdd = 'projected_data_dict'
# self.proc_data_dict[pdd]["qb10"]["pe"] = self.proc_data_dict[pdd]["qb10"]["pe"].T
# self.proc_data_dict[pdd]["qb10"]["pg"] = (1 - self.proc_data_dict[pdd]["qb10"]["pe"])
for suffix in ["", "_corrected"]:
projdd_per_prep_state = \
deepcopy(self.proc_data_dict.get(pdd + suffix, {}))
for qbn, data_qbi in \
self.proc_data_dict.get(pdd + suffix, {}).items():
prep_states = self.sp.get_values("initialize")
for j, (state, data) in enumerate(data_qbi.items()):
n_ro = data.shape[0] # infer number of readouts per sequence
projdd_per_prep_state[qbn][state] = dict()
for i, prep_state in enumerate(prep_states):
projdd_per_prep_state[qbn][state].update(
{f"prep_{prep_state}":
data[i*n_ro//len(prep_states):
(i+1)*n_ro//len(prep_states),
:]})
if len(projdd_per_prep_state):
self.proc_data_dict[pdd + '_per_prep_state' + suffix] = \
projdd_per_prep_state
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
if "ro_separation" in self.get_param_value("preparation_params"):
ro_sep = \
self.get_param_value("preparation_params")["ro_separation"]
else:
return
base_data_key = 'projected_data_dict_per_prep_state'
data_keys = [base_data_key]
if self.proc_data_dict.get(base_data_key + '_corrected', False):
data_keys += [base_data_key + '_corrected']
for dk, suffix in zip(data_keys, ('', '_corrected')):
for qbn in self.qb_names:
probs = self.proc_data_dict[dk][qbn]
for prep_state, g_pop in probs.get('pg', {}).items():
if "g" in prep_state:
continue # no need to fit reset on ground state
for seq_nr, g_pop_per_seq in enumerate(g_pop.T):
excited_pop = 1 - g_pop_per_seq
# excited_pop = np.exp(-np.arange(len(g_pop_per_seq)))
if self.num_cal_points != 0:
# do not fit data with cal points
excited_pop = excited_pop[:-self.num_cal_points]
if len(excited_pop) < 3:
log.warning('Not enough reset pulses to fit a reset '
'rate, increase the number of reset '
'pulses to 3 or more ')
continue
time = np.arange(len(excited_pop)) * ro_sep
# linear rate approx
rate_guess = (excited_pop[0] - excited_pop[-1]) / time[-1]
decay = lambda time, a, rate, offset: \
a * np.exp(-2 * np.pi * rate * time) + offset
decay_model = lmfit.Model(decay)
decay_model.set_param_hint('a', value=excited_pop[0])
decay_model.set_param_hint('rate', value=rate_guess)
decay_model.set_param_hint('n', value=1, vary=False)
decay_model.set_param_hint('offset', value=0)
params = decay_model.make_params()
key = f'fit_rate_{qbn}_{prep_state}_seq_{seq_nr}{suffix}'
self.fit_dicts[key] = {
'fit_fn': decay_model.func,
'fit_xvals': {'time': time},
'fit_yvals': {'data': excited_pop},
'guess_pars': params}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
apd = self.proc_data_dict['analysis_params_dict']
base_data_key = 'projected_data_dict_per_prep_state'
data_keys = [base_data_key]
if self.proc_data_dict.get(base_data_key + '_corrected', False):
data_keys += [base_data_key + '_corrected']
for dk, suffix in zip(data_keys, ('', '_corrected')):
for qbn in self.qb_names:
probs = self.proc_data_dict[dk][qbn]
for prep_state, g_pop in probs.get('pg', {}).items():
if "g" in prep_state:
continue # no fit for reset on ground state
for seq_nr in range(len((g_pop.T))):
key = f'fit_rate_{qbn}_{prep_state}_seq_{seq_nr}{suffix}'
for param, param_key in zip(('rate', 'offset'),
("reset_rate",
"residual_population")):
pk = param_key + suffix
res = self.fit_res[key]
param_val = res.params[param].value
param_stderr = res.params[param].stderr
if not pk in apd:
apd[pk] = defaultdict(dict)
if not prep_state in apd[pk][qbn]:
apd[pk][qbn][prep_state] = \
defaultdict(dict)
apd[pk][qbn][prep_state]["val"] = \
apd[pk][qbn][prep_state].get("val", []) + \
[param_val]
apd[pk][qbn][prep_state]["stderr"] = \
apd[pk][qbn][prep_state].get("stderr", []) + \
[param_stderr]
self.save_processed_data(key="analysis_params_dict")
def prepare_plots(self):
# prepare raw population plots
legend_bbox_to_anchor = (1, -0.20)
legend_pos = 'upper right'
legend_ncol = 2 #len(self.sp.get_values("initialize"))
# overwrite baseAnalysis plots
self.plot_dicts = OrderedDict()
basekey = 'projected_data_dict_per_prep_state'
suffixes = ('', '_corrected')
keys = {basekey + suffix: suffix for suffix in suffixes
if basekey + suffix in self.proc_data_dict}
for k in keys:
for qbn, data_qbi in self.proc_data_dict[k].items():
for i, (state, data) in enumerate(data_qbi.items()):
for j, (prep_state, data_prep_state) in \
enumerate(data.items()):
for seq_nr, pop in enumerate(data_prep_state.T):
plt_key = 'data_{}_{}_{}_{}_{}'.format(
k, qbn, state, prep_state, seq_nr)
fig_key = f"populations_{qbn}_{prep_state}{keys[k]}"
self.plot_dicts[plt_key] = {
'plotfn': self.plot_line,
'fig_id':fig_key,
'xvals': np.arange(len(pop)),
'xlabel': "Reset cycle, $n$",
'xunit': "",
'yvals': pop,
'yerr': self._std_error(
pop, self.get_param_value('n_shots')),
'ylabel': 'Population, $P$',
'yunit': '',
'yscale': self.get_param_value("yscale", "log"),
'setlabel': self._get_pop_label(state, k,
not self._has_reset_pulses(seq_nr),
),
'title': self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']
+ " " + prep_state,
'titlepad': 0.2,
'linestyle': '-',
'color': f'C{i}',
'alpha': 0.5 if seq_nr == 0 else 1,
'do_legend': True,
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_fontsize': 5}
# add feedback params info to plot
textstr = self._get_feedback_params_text_str(qbn)
self.plot_dicts[f'text_msg_{qbn}_' \
f'{prep_state}{keys[k]}'] = {
'fig_id': f"populations_{qbn}_{prep_state}{keys[k]}",
'ypos': -0.21,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
"fontsize": "x-small",
'text_string': textstr}
# add thermal population line
g_state_prep_g = \
data_qbi.get("pg", {}).get('prep_g', None)
# taking first ro of first sequence as estimate for
# thermal population
if g_state_prep_g is not None and seq_nr == 0:
p_therm = 1 - g_state_prep_g.flatten()[0]
self.plot_dicts[plt_key + "_thermal"] = {
'plotfn': self.plot_line,
'fig_id': fig_key,
'xvals': np.arange(len(pop)),
'yvals': p_therm * np.ones_like(pop),
'setlabel': "$P_\mathrm{therm}$",
'linestyle': '--',
'marker': "",
'color': 'k',
'do_legend': True,
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_fontsize': 5}
# plot fit results
fit_key = \
f'fit_rate_{qbn}_{prep_state}_seq_{seq_nr}{keys[k]}'
if fit_key in self.fit_res and \
not fit_key in self.plot_dicts:
res = self.fit_res[fit_key]
rate = res.best_values['rate'] * 1e-6
residual_pop = res.best_values['offset']
superscript = "{NR}" if seq_nr == 0 \
else f"{{c {seq_nr}}}" if "corrected" \
in fit_key else f"{{{seq_nr}}}"
label = f'fit: $\Gamma_{prep_state[-1]}^{superscript}' \
f' = {rate:.3f}$ MHz'
if seq_nr != 0:
# add residual population if not no reset
label += f", $P_\mathrm{{exc}}^\mathrm{{res}}$" \
f" = {residual_pop*100:.2f} %"
self.plot_dicts[fit_key] = {
'plotfn': self.plot_fit,
'fig_id':
f"rate_{qbn}{keys[k]}",
'xvals': res.userkws['time'],
'xlabel': "Reset cycle, $n$",
'fit_res': res,
'xunit': "s",
'ylabel': 'Population, $P$',
'yscale': self.get_param_value("yscale", "log"),
'setlabel': label,
'title': self.raw_data_dict['timestamp'] + ' ' +
f"Reset rates {qbn}{keys[k]}",
'color': f'C{j}',
'alpha': 1 if self._has_reset_pulses(seq_nr) else 0.5,
'do_legend': seq_nr in [0, 1],
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_fontsize': 5}
self.plot_dicts[fit_key + 'data'] = {
'plotfn': self.plot_line,
'fig_id':
f"rate_{qbn}{keys[k]}",
'xvals': res.userkws['time'],
'xlabel': "Time, $t$",
'xunit': "s",
'yvals': res.data,
'yerr': self._std_error(
res.data, self.get_param_value('n_shots')),
'ylabel': 'Excited Pop., $P_\mathrm{exc}$',
'yunit': '',
'setlabel':
"data" if
self._has_reset_pulses(seq_nr)
else "data NR",
'linestyle': 'none',
'color': f'C{j}',
'alpha': 1 if self._has_reset_pulses(seq_nr) else 0.5,
"do_legend": True,
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_fontsize': 5
}
def _has_reset_pulses(self, seq_nr):
return not self.sp.get_values('pulse_off')[seq_nr]
def plot(self, **kw):
super().plot(**kw)
# add second axis to population figures
from matplotlib.ticker import MaxNLocator
for axname, ax in self.axs.items():
if "populations" in axname:
if "ro_separation" in self.get_param_value("preparation_params"):
ro_sep = \
self.get_param_value("preparation_params")["ro_separation"]
timeax = ax.twiny()
timeax.set_xlabel(r"Time ($\mu s$)")
timeax.set_xlim(0, ax.get_xlim()[1] * ro_sep * 1e6)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
# plot raw readouts
if self.get_param_value('plot_raw_shots'):
prep_states = self.sp.get_values("initialize")
n_seqs = self.sp.length(1)
for qbn, shots in self.proc_data_dict['single_shots_per_qb'].items():
# shots are organized as follow, from outer to inner loop:
# shot_prep-state_reset-ro_seq-nr
n_ro = len(shots) // self.get_param_value("n_shots")
n_ro_per_prep_state = n_ro // (n_seqs * len(prep_states))
for i, prep_state in enumerate(prep_states):
for j in range(n_ro_per_prep_state):
for seq_nr in range(n_seqs):
ro = i * n_ro_per_prep_state * len(prep_states) \
+ j * len(prep_states) + seq_nr
shots_single_ro = shots[ro::n_ro]
# first sequence is "no reset"
seq_label = 'NR' if seq_nr == 0 else seq_nr
fig_key = \
f"histograms_seq_{seq_label}_reset_cycle_{j}"
if fig_key not in self.figs:
self.figs[fig_key], _ = plt.subplots()
if shots.shape[1] == 2:
plot_func = \
MultiQutrit_Singleshot_Readout_Analysis.\
plot_scatter_and_marginal_hist
kwargs = dict(create_axes=not bool(i))
elif shots.shape[1] == 1:
plot_func = \
MultiQutrit_Singleshot_Readout_Analysis.\
plot_1D_hist
kwargs = {}
else:
raise NotImplementedError(
"Raw shot plotting not implemented for"
f" {shots.shape[1]} dimensions")
colors = [f'C{i}']
fig, _ = plot_func(shots_single_ro,
y_true=[i]*shots_single_ro.shape[0],
colors=colors,
legend=True,
legend_labels={i: "prep " + prep_state},
fig=self.figs[fig_key], **kwargs)
fig.suptitle(f'Reset cycle: {j}')
def _get_feedback_params_text_str(self, qbn):
str = "Reset cycle time: "
ro_sep = self.prep_params.get("ro_separation", None)
str += f"{1e6 * ro_sep:.2f}$\mu s$" if ro_sep is not None else "Unknown"
str += "\n"
str += "RO to feedback time: "
prow = self.prep_params.get("post_ro_wait", None)
str += f"{1e6 * prow:.2f}$\mu s$" if ro_sep is not None else "Unknown"
str += "\n"
thresholds = self.get_param_value('thresholds', {})
str += "Threshold(s):\n{}".format(
"\n".join([f"{i}: {t:0.5f}" for i, t in
thresholds.get(qbn, {}).items()]))
return str
@staticmethod
def _get_pop_label(state, key, no_reset=False):
superscript = "{NR}" if no_reset else "{c}" \
if "corrected" in key else "{}"
return f'$P_{state[-1]}^{superscript}$'
@staticmethod
def _std_error(p, nshots=10000):
return np.sqrt(np.abs(p)*(1-np.abs(p))/nshots)
class FluxPulseTimingAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
# super().__init__(qb_names, *args, **kwargs)
options_dict = kwargs.pop('options_dict', {})
options_dict['TwoD'] = True
kwargs['options_dict'] = options_dict
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
# Make sure data has the right shape (len(hard_sp), len(soft_sp))
for qbn, data in self.proc_data_dict['data_to_fit'].items():
if data.shape[1] != self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'].size:
self.proc_data_dict['data_to_fit'][qbn] = data.T
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn][0]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
TwoErrorFuncModel = lmfit.Model(fit_mods.TwoErrorFunc)
guess_pars = fit_mods.TwoErrorFunc_guess(model=TwoErrorFuncModel,
data=data, \
delays=sweep_points)
guess_pars['amp'].vary = True
guess_pars['mu_A'].vary = True
guess_pars['mu_B'].vary = True
guess_pars['sigma'].vary = True
guess_pars['offset'].vary = True
key = 'two_error_func_' + qbn
self.fit_dicts[key] = {
'fit_fn': TwoErrorFuncModel.func,
'fit_xvals': {'x': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
mu_A = self.fit_dicts['two_error_func_' + qbn]['fit_res'].best_values[
'mu_A']
mu_B = self.fit_dicts['two_error_func_' + qbn]['fit_res'].best_values[
'mu_B']
fp_length = a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'],
instr_name=qbn, param_name='flux_pulse_pulse_length')
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn]['delay'] = \
mu_A + 0.5 * (mu_B - mu_A) - fp_length / 2
try:
self.proc_data_dict['analysis_params_dict'][qbn]['delay_stderr'] = \
1 / 2 * np.sqrt(
self.fit_dicts['two_error_func_' + qbn]['fit_res'].params[
'mu_A'].stderr ** 2
+ self.fit_dicts['two_error_func_' + qbn]['fit_res'].params[
'mu_B'].stderr ** 2)
except TypeError:
self.proc_data_dict['analysis_params_dict'][qbn]['delay_stderr']\
= 0
self.proc_data_dict['analysis_params_dict'][qbn]['fp_length'] = \
(mu_B - mu_A)
try:
self.proc_data_dict['analysis_params_dict'][qbn]['fp_length_stderr'] = \
np.sqrt(
self.fit_dicts['two_error_func_' + qbn]['fit_res'].params[
'mu_A'].stderr ** 2
+ self.fit_dicts['two_error_func_' + qbn]['fit_res'].params[
'mu_B'].stderr ** 2)
except TypeError:
self.proc_data_dict['analysis_params_dict'][qbn][
'fp_length_stderr'] = 0
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
self.options_dict.update({'TwoD': False,
'plot_proj_data': False})
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
# rename base plot
base_plot_name = 'Pulse_timing_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn][0],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['two_error_func_' + qbn]['fit_res'],
'setlabel': 'two error func. fit',
'do_legend': True,
'color': 'r',
'legend_ncol': 1,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
apd = self.proc_data_dict['analysis_params_dict']
textstr = 'delay = {:.2f} ns'.format(apd[qbn]['delay']*1e9) \
+ ' $\pm$ {:.2f} ns'.format(apd[qbn]['delay_stderr']
* 1e9)
textstr += '\n\nflux_pulse_length:\n fitted = {:.2f} ns'.format(
apd[qbn]['fp_length'] * 1e9) \
+ ' $\pm$ {:.2f} ns'.format(
apd[qbn]['fp_length_stderr'] * 1e9)
textstr += '\n set = {:.2f} ns'.format(
1e9 * a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'],
instr_name=qbn, param_name='flux_pulse_pulse_length'))
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxPulseTimingBetweenQubitsAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.' + qbn
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
# super().__init__(qb_names, *args, **kwargs)
options_dict = kwargs.pop('options_dict', {})
options_dict['TwoD'] = True
kwargs['options_dict'] = options_dict
super().__init__(qb_names, *args, **kwargs)
# self.analyze_results()
def process_data(self):
super().process_data()
# Make sure data has the right shape (len(hard_sp), len(soft_sp))
for qbn, data in self.proc_data_dict['data_to_fit'].items():
if data.shape[1] != self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'].size:
self.proc_data_dict['data_to_fit'][qbn] = data.T
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn][0]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
delays = np.zeros(len(sweep_points) * 2 - 1)
delays[0::2] = sweep_points
delays[1::2] = sweep_points[:-1] + np.diff(sweep_points) / 2
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
symmetry_idx, corr_data = find_symmetry_index(data)
delay = delays[symmetry_idx]
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn]['delays'] = delays
self.proc_data_dict['analysis_params_dict'][qbn]['delay'] = delay
self.proc_data_dict['analysis_params_dict'][qbn][
'delay_stderr'] = np.diff(delays).mean()
self.proc_data_dict['analysis_params_dict'][qbn][
'corr_data'] = np.array(corr_data)
self.save_processed_data(key='analysis_params_dict')
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn][0]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
model = lmfit.Model(lambda t, slope, offset, delay:
slope*np.abs((t-delay)) + offset)
delay_guess = sweep_points[np.argmin(data)]
offset_guess = np.min(data)
slope_guess = (data[-1] - offset_guess) / (sweep_points[-1] -
delay_guess)
guess_pars = model.make_params(slope=slope_guess,
delay=delay_guess,
offset=offset_guess)
key = 'delay_fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': model.func,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn]['delay_fit'] = \
self.fit_dicts['delay_fit_' + qbn]['fit_res'].best_values['delay']
try:
stderr = self.fit_dicts['delay_fit_' + qbn]['fit_res'].params[
'delay'].stderr
stderr = np.nan if stderr is None else stderr
self.proc_data_dict['analysis_params_dict'][qbn][
'delay_fit_stderr'] = stderr
except TypeError:
self.proc_data_dict['analysis_params_dict'][qbn][
'delay_fit_stderr'] \
= 0
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
self.options_dict.update({'TwoD': False,
'plot_proj_data': False})
apd = self.proc_data_dict['analysis_params_dict']
super().prepare_plots()
rdd = self.raw_data_dict
for qbn in self.qb_names:
# rename base plot
base_plot_name = 'Pulse_timing_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn][0],
plot_name_suffix=qbn + 'fit',
qb_name=qbn)
if self.do_fitting:
self.plot_dicts['fit_' + base_plot_name] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_res[ 'delay_fit_' + qbn],
'setlabel': 'fit',
'color': 'r',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
textstr = 'delay = {:.2f} ns'.format(apd[qbn]['delay_fit'] *
1e9) \
+ ' $\pm$ {:.2f} ns'.format(apd[qbn][
'delay_fit_stderr']
* 1e9)
self.plot_dicts['text_msg_fit' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
corr_data = self.proc_data_dict['analysis_params_dict'][qbn][
'corr_data']
delays = self.proc_data_dict['analysis_params_dict'][qbn]['delays']
self.plot_dicts['Autoconvolution_' + qbn] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qbn,
'fig_name': f'Autoconvolution_{qbn}',
'fig_id': f'Autoconvolution_{qbn}',
'plotfn': self.plot_line,
'xvals': delays[0::2] / 1e-9,
'yvals': corr_data[0::2],
'xlabel': r'Delay time',
'xunit': 'ns',
'ylabel': 'Autoconvolution function',
'linestyle': '-',
'color': 'k',
# 'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
self.plot_dicts['Autoconvolution2_' + qbn] = {
'fig_id': f'Autoconvolution_{qbn}',
'plotfn': self.plot_line,
'xvals': delays[1::2] / 1e-9,
'yvals': corr_data[1::2],
'color': 'r'}
self.plot_dicts['corr_vline_' + qbn] = {
'fig_id': f'Autoconvolution_{qbn}',
'plotfn': self.plot_vlines,
'x': self.proc_data_dict['analysis_params_dict'][qbn][
'delay'] / 1e-9,
'ymin': corr_data.min(),
'ymax': corr_data.max(),
'colors': 'gray'}
textstr = 'delay = {:.2f} ns'.format(apd[qbn]['delay'] * 1e9) \
+ ' $\pm$ {:.2f} ns'.format(apd[qbn]['delay_stderr']
* 1e9)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': f'Autoconvolution_{qbn}',
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxPulseScopeAnalysis(MultiQubit_TimeDomain_Analysis):
"""
Analysis class for a flux pulse scope measurement.
options_dict parameters specific to this class:
- freq_ranges_remove/delay_ranges_remove: dict with keys qubit names and
values list of length-2 lists/tuples that specify frequency/delays
ranges to completely exclude (from both the fit and the plots)
Ex: delay_ranges_remove = {'qb1': [ [5e-9, 72e-9] ]}
delay_ranges_remove = {'qb1': [ [5e-9, 20e-9], [50e-9, 72e-9] ]}
freq_ranges_remove = {'qb1': [ [5.42e9, 5.5e9] ]}
- freq_ranges_to_fit/delay_ranges_to_fit: dict with keys qubit names and
values list of length-2 lists/tuples that specify frequency/delays
ranges that should be fitted (only these will be fitted!).
Plots will still show the full data.
Ex: delays_ranges_to_fit = {'qb1': [ [5e-9, 72e-9] ]}
delays_ranges_to_fit = {'qb1': [ [5e-9, 20e-9], [50e-9, 72e-9] ]}
freq_ranges_to_fit = {'qb1': [ [5.42e9, 5.5e9] ]}
- rectangles_exclude: dict with keys qubit names and
values list of length-4 lists/tuples that specify delays and frequency
ranges that should be excluded from the fit (these will not be
fitted!). Plots will still show the full data.
Ex: {'qb1': [ [-10e-9, 5e-9, 5.42e9, 5.5e9], [...] ]}
- fit_first_cal_state: dict with keys qubit names and values booleans
specifying whether to fit the delay points corresponding to the first
cal state (usually g) for that qubit
- sigma_guess: dict with keys qubit names and values floats specifying the
fit guess value for the Gaussian sigma
- sign_of_peaks: dict with keys qubit names and values floats specifying the
the sign of the peaks used for setting the amplitude guess in the fit
- from_lower: unclear; should be cleaned up (TODO, Steph 07.10.2020)
- ghost: unclear; should be cleaned up (TODO, Steph 07.10.2020)
"""
def __init__(self, *args, **kwargs):
options_dict = kwargs.pop('options_dict', {})
options_dict['TwoD'] = True
kwargs['options_dict'] = options_dict
super().__init__(*args, **kwargs)
def extract_data(self):
super().extract_data()
# Set some default values specific to FluxPulseScopeAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('rotation_type', default_value=None) is None:
self.options_dict['rotation_type'] = 'fixed_cal_points'
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
# dictionaries with keys qubit names and values a list of tuples of
# 2 numbers specifying ranges to exclude
freq_ranges_remove = self.get_param_value('freq_ranges_remove')
delay_ranges_remove = self.get_param_value('delay_ranges_remove')
self.proc_data_dict['proc_data_to_fit'] = deepcopy(
self.proc_data_dict['data_to_fit'])
self.proc_data_dict['proc_sweep_points_2D_dict'] = deepcopy(
self.proc_data_dict['sweep_points_2D_dict'])
self.proc_data_dict['proc_sweep_points_dict'] = deepcopy(
self.proc_data_dict['sweep_points_dict'])
if freq_ranges_remove is not None:
for qbn, freq_range_list in freq_ranges_remove.items():
if freq_range_list is None:
continue
# find name of 1st sweep point in sweep dimension 1
param_name = [p for p in self.mospm[qbn]
if self.sp.find_parameter(p)][0]
for freq_range in freq_range_list:
freqs = self.proc_data_dict['proc_sweep_points_2D_dict'][
qbn][param_name]
data = self.proc_data_dict['proc_data_to_fit'][qbn]
reduction_arr = np.logical_not(
np.logical_and(freqs > freq_range[0],
freqs < freq_range[1]))
freqs_reshaped = freqs[reduction_arr]
self.proc_data_dict['proc_data_to_fit'][qbn] = \
data[reduction_arr]
self.proc_data_dict['proc_sweep_points_2D_dict'][qbn][
param_name] = freqs_reshaped
# remove delays
if delay_ranges_remove is not None:
for qbn, delay_range_list in delay_ranges_remove.items():
if delay_range_list is None:
continue
for delay_range in delay_range_list:
delays = self.proc_data_dict['proc_sweep_points_dict'][qbn][
'msmt_sweep_points']
data = self.proc_data_dict['proc_data_to_fit'][qbn]
reduction_arr = np.logical_not(
np.logical_and(delays > delay_range[0],
delays < delay_range[1]))
delays_reshaped = delays[reduction_arr]
self.proc_data_dict['proc_data_to_fit'][qbn] = \
np.concatenate([
data[:, :-self.num_cal_points][:, reduction_arr],
data[:, -self.num_cal_points:]], axis=1)
self.proc_data_dict['proc_sweep_points_dict'][qbn][
'msmt_sweep_points'] = delays_reshaped
self.proc_data_dict['proc_sweep_points_dict'][qbn][
'sweep_points'] = self.cp.extend_sweep_points(
delays_reshaped, qbn)
self.sign_of_peaks = self.get_param_value('sign_of_peaks',
default_value=None)
if self.sign_of_peaks is None:
self.sign_of_peaks = {qbn: None for qbn in self.qb_names}
for qbn in self.qb_names:
if self.sign_of_peaks.get(qbn, None) is None:
if self.rotation_type == 'fixed_cal_points'\
or self.rotation_type.endswith('PCA'):
# e state corresponds to larger values than g state
# (either due to cal points or due to set_majority_sign)
self.sign_of_peaks[qbn] = 1
else:
msmt_data = self.proc_data_dict['proc_data_to_fit'][qbn][
:, :-self.num_cal_points]
self.sign_of_peaks[qbn] = np.sign(np.mean(msmt_data) -
np.median(msmt_data))
self.sigma_guess = self.get_param_value('sigma_guess')
if self.sigma_guess is None:
self.sigma_guess = {qbn: 10e6 for qbn in self.qb_names}
self.from_lower = self.get_param_value('from_lower')
if self.from_lower is None:
self.from_lower = {qbn: False for qbn in self.qb_names}
self.ghost = self.get_param_value('ghost')
if self.ghost is None:
self.ghost = {qbn: False for qbn in self.qb_names}
def prepare_fitting_slice(self, freqs, qbn, mu_guess,
slice_idx=None, data_slice=None,
mu0_guess=None, do_double_fit=False):
if slice_idx is None:
raise ValueError('"slice_idx" cannot be None. It is used '
'for unique names in the fit_dicts.')
if data_slice is None:
data_slice = self.proc_data_dict['proc_data_to_fit'][qbn][
:, slice_idx]
GaussianModel = lmfit.Model(fit_mods.DoubleGaussian) if do_double_fit \
else lmfit.Model(fit_mods.Gaussian)
ampl_guess = (data_slice.max() - data_slice.min()) / \
0.4 * self.sign_of_peaks[qbn] * self.sigma_guess[qbn]
offset_guess = data_slice[0]
GaussianModel.set_param_hint('sigma',
value=self.sigma_guess[qbn],
vary=True)
GaussianModel.set_param_hint('mu',
value=mu_guess,
vary=True)
GaussianModel.set_param_hint('ampl',
value=ampl_guess,
vary=True)
GaussianModel.set_param_hint('offset',
value=offset_guess,
vary=True)
if do_double_fit:
GaussianModel.set_param_hint('sigma0',
value=self.sigma_guess[qbn],
vary=True)
GaussianModel.set_param_hint('mu0',
value=mu0_guess,
vary=True)
GaussianModel.set_param_hint('ampl0',
value=ampl_guess/2,
vary=True)
guess_pars = GaussianModel.make_params()
self.set_user_guess_pars(guess_pars)
key = f'gauss_fit_{qbn}_slice{slice_idx}'
self.fit_dicts[key] = {
'fit_fn': GaussianModel.func,
'fit_xvals': {'freq': freqs},
'fit_yvals': {'data': data_slice},
'guess_pars': guess_pars}
def prepare_fitting(self):
self.rectangles_exclude = self.get_param_value('rectangles_exclude')
self.delays_double_fit = self.get_param_value('delays_double_fit')
self.delay_ranges_to_fit = self.get_param_value(
'delay_ranges_to_fit', default_value={})
self.freq_ranges_to_fit = self.get_param_value(
'freq_ranges_to_fit', default_value={})
fit_first_cal_state = self.get_param_value(
'fit_first_cal_state', default_value={})
self.fit_dicts = OrderedDict()
self.delays_for_fit = OrderedDict()
self.freqs_for_fit = OrderedDict()
for qbn in self.qb_names:
# find name of 1st sweep point in sweep dimension 1
param_name = [p for p in self.mospm[qbn]
if self.sp.find_parameter(p)][0]
data = self.proc_data_dict['proc_data_to_fit'][qbn]
delays = self.proc_data_dict['proc_sweep_points_dict'][qbn][
'sweep_points']
self.delays_for_fit[qbn] = np.array([])
self.freqs_for_fit[qbn] = []
dr_fit = self.delay_ranges_to_fit.get(qbn, [(min(delays),
max(delays))])
fr_fit = self.freq_ranges_to_fit.get(qbn, [])
if not fit_first_cal_state.get(qbn, True):
first_cal_state = list(self.cal_states_dict_for_rotation[qbn])[0]
first_cal_state_idxs = self.cal_states_dict[first_cal_state]
if first_cal_state_idxs is None:
first_cal_state_idxs = []
for i, delay in enumerate(delays):
do_double_fit = False
if not fit_first_cal_state.get(qbn, True) and \
i-len(delays) in first_cal_state_idxs:
continue
if any([t[0] <= delay <= t[1] for t in dr_fit]):
data_slice = data[:, i]
freqs = self.proc_data_dict['proc_sweep_points_2D_dict'][
qbn][param_name]
if len(fr_fit):
mask = [np.logical_and(t[0] < freqs, freqs < t[1])
for t in fr_fit]
if len(mask) > 1:
mask = np.logical_or(*mask)
freqs = freqs[mask]
data_slice = data_slice[mask]
if self.rectangles_exclude is not None and \
self.rectangles_exclude.get(qbn, None) is not None:
for rectangle in self.rectangles_exclude[qbn]:
if rectangle[0] < delay < rectangle[1]:
reduction_arr = np.logical_not(
np.logical_and(freqs > rectangle[2],
freqs < rectangle[3]))
freqs = freqs[reduction_arr]
data_slice = data_slice[reduction_arr]
if self.delays_double_fit is not None and \
self.delays_double_fit.get(qbn, None) is not None:
rectangle = self.delays_double_fit[qbn]
do_double_fit = rectangle[0] < delay < rectangle[1]
reduction_arr = np.invert(np.isnan(data_slice))
freqs = freqs[reduction_arr]
data_slice = data_slice[reduction_arr]
self.freqs_for_fit[qbn].append(freqs)
self.delays_for_fit[qbn] = np.append(
self.delays_for_fit[qbn], delay)
if do_double_fit:
peak_indices = sp.signal.find_peaks(
data_slice, distance=50e6/(freqs[1] - freqs[0]))[0]
peaks = data_slice[peak_indices]
srtd_idxs = np.argsort(np.abs(peaks))
mu_guess = freqs[peak_indices[srtd_idxs[-1]]]
mu0_guess = freqs[peak_indices[srtd_idxs[-2]]]
else:
mu_guess = freqs[np.argmax(
data_slice * self.sign_of_peaks[qbn])]
mu0_guess = None
self.prepare_fitting_slice(freqs, qbn, mu_guess, i,
data_slice=data_slice,
mu0_guess=mu0_guess,
do_double_fit=do_double_fit)
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
delays = self.proc_data_dict['proc_sweep_points_dict'][qbn][
'sweep_points']
fit_keys = [k for k in self.fit_dicts if qbn in k.split('_')]
fitted_freqs = np.zeros(len(fit_keys))
fitted_freqs_errs = np.zeros(len(fit_keys))
deep = False
for i, fk in enumerate(fit_keys):
fit_res = self.fit_dicts[fk]['fit_res']
mu_param = 'mu'
if 'mu0' in fit_res.best_values:
mu_param = 'mu' if fit_res.best_values['mu'] > \
fit_res.best_values['mu0'] else 'mu0'
fitted_freqs[i] = fit_res.best_values[mu_param]
fitted_freqs_errs[i] = fit_res.params[mu_param].stderr
if self.from_lower[qbn]:
if self.ghost[qbn]:
if (fitted_freqs[i - 1] - fit_res.best_values['mu']) / \
fitted_freqs[i - 1] > 0.05 and i > len(delays)-4:
deep = False
condition1 = ((fitted_freqs[i-1] -
fit_res.best_values['mu']) /
fitted_freqs[i-1]) < -0.015
condition2 = (i > 1 and i < (len(fitted_freqs) -
len(delays)))
if condition1 and condition2:
if deep:
mu_guess = fitted_freqs[i-1]
self.prepare_fitting_slice(
self.freqs_for_fit[qbn][i], qbn, mu_guess, i)
self.run_fitting(keys_to_fit=[fk])
fitted_freqs[i] = self.fit_dicts[fk][
'fit_res'].best_values['mu']
fitted_freqs_errs[i] = self.fit_dicts[fk][
'fit_res'].params['mu'].stderr
deep = True
else:
if self.ghost[qbn]:
if (fitted_freqs[i - 1] - fit_res.best_values['mu']) / \
fitted_freqs[i - 1] > -0.05 and \
i > len(delays) - 4:
deep = False
if (fitted_freqs[i - 1] - fit_res.best_values['mu']) / \
fitted_freqs[i - 1] > 0.015 and i > 1:
if deep:
mu_guess = fitted_freqs[i - 1]
self.prepare_fitting_slice(
self.freqs_for_fit[qbn][i], qbn, mu_guess, i)
self.run_fitting(keys_to_fit=[fk])
fitted_freqs[i] = self.fit_dicts[fk][
'fit_res'].best_values['mu']
fitted_freqs_errs[i] = self.fit_dicts[fk][
'fit_res'].params['mu'].stderr
deep = True
self.proc_data_dict['analysis_params_dict'][
f'fitted_freqs_{qbn}'] = {'val': fitted_freqs,
'stderr': fitted_freqs_errs}
self.proc_data_dict['analysis_params_dict'][f'delays_{qbn}'] = \
self.delays_for_fit[qbn]
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
base_plot_name = 'FluxPulseScope_' + qbn
xlabel, xunit = self.get_xaxis_label_unit(qbn)
# find name of 1st sweep point in sweep dimension 1
param_name = [p for p in self.mospm[qbn]
if self.sp.find_parameter(p)][0]
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=param_name)
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=param_name)
xvals = self.proc_data_dict['proc_sweep_points_dict'][qbn][
'sweep_points']
self.plot_dicts[f'{base_plot_name}_main'] = {
'plotfn': self.plot_colorxy,
'fig_id': base_plot_name,
'xvals': xvals,
'yvals': self.proc_data_dict['proc_sweep_points_2D_dict'][
qbn][param_name],
'zvals': self.proc_data_dict['proc_data_to_fit'][qbn],
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.measurement_strings[qbn]),
'clabel': self.get_yaxis_label(qb_name=qbn)}
self.plot_dicts[f'{base_plot_name}_fit'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': self.delays_for_fit[qbn],
'yvals': self.proc_data_dict['analysis_params_dict'][
f'fitted_freqs_{qbn}']['val'],
'yerr': self.proc_data_dict['analysis_params_dict'][
f'fitted_freqs_{qbn}']['stderr'],
'color': 'r',
'linestyle': '-',
'marker': 'x'}
# plot with log scale on x-axis
self.plot_dicts[f'{base_plot_name}_main_log'] = {
'plotfn': self.plot_colorxy,
'fig_id': f'{base_plot_name}_log',
'xvals': xvals*1e6,
'yvals': self.proc_data_dict['proc_sweep_points_2D_dict'][
qbn][param_name]/1e9,
'zvals': self.proc_data_dict['proc_data_to_fit'][qbn],
'xlabel': f'{xlabel} ($\\mu${xunit})',
'ylabel': f'{ylabel} (G{yunit})',
'logxscale': True,
'xrange': [min(xvals*1e6), max(xvals*1e6)],
'no_label_units': True,
'no_label': True,
'clabel': self.get_yaxis_label(qb_name=qbn)}
self.plot_dicts[f'{base_plot_name}_fit_log'] = {
'fig_id': f'{base_plot_name}_log',
'plotfn': self.plot_line,
'xvals': self.delays_for_fit[qbn]*1e6,
'yvals': self.proc_data_dict['analysis_params_dict'][
f'fitted_freqs_{qbn}']['val']/1e9,
'yerr': self.proc_data_dict['analysis_params_dict'][
f'fitted_freqs_{qbn}']['stderr']/1e9,
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.measurement_strings[qbn]),
'color': 'r',
'linestyle': '-',
'marker': 'x'}
class RunTimeAnalysis(ba.BaseDataAnalysis):
"""
Provides elementary analysis of Run time by plotting all timers
saved in the hdf5 file of a measurement.
"""
def __init__(self,
label: str = '',
t_start: str = None, t_stop: str = None, data_file_path: str = None,
options_dict: dict = None, extract_only: bool = False,
do_fitting: bool = True, auto=True,
params_dict=None, numeric_params=None, **kwargs):
super().__init__(t_start=t_start, t_stop=t_stop, label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting, **kwargs)
self.timers = {}
if not hasattr(self, "job"):
self.create_job(t_start=t_start, t_stop=t_stop,
label=label, data_file_path=data_file_path,
do_fitting=do_fitting, options_dict=options_dict,
extract_only=extract_only, params_dict=params_dict,
numeric_params=numeric_params, **kwargs)
self.params_dict = {f"{tm_mod.Timer.HDF_GRP_NAME}":
f"{tm_mod.Timer.HDF_GRP_NAME}",
"repetition_rate":
"Instrument settings/TriggerDevice.pulse_period",
}
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
timers_dicts = self.raw_data_dict.get('Timers', {})
for t, v in timers_dicts.items():
self.timers[t] = tm_mod.Timer(name=t, **v)
# Extract and build raw measurement timer
self.timers['BareMeasurement'] = self.bare_measurement_timer(
ref_time=self.get_param_value("ref_time")
)
def process_data(self):
pass
def plot(self, **kwargs):
timers = [t for t in self.timers.values() if len(t)]
plot_kws = self.get_param_value('plot_kwargs', {})
for t in timers:
try:
self.figs["timer_" + t.name] = t.plot(**plot_kws)
except Exception as e:
if self.raise_exceptions:
raise
log.error(f'Could not plot Timer: {t.name}: {e}')
if self.get_param_value('combined_timer', True):
self.figs['timer_all'] = tm_mod.multi_plot(timers,
**plot_kws)
def bare_measurement_timer(self, ref_time=None,
checkpoint='bare_measurement', **kw):
bmtime = self.bare_measurement_time(**kw)
bmtimer = tm_mod.Timer('BareMeasurement', auto_start=False)
if ref_time is None:
try:
ts = [t.find_earliest() for t in self.timers.values()]
ts = [t[-1] for t in ts if len(t)]
arg_sorted = sorted(range(len(ts)),
key=list(ts).__getitem__)
ref_time = ts[arg_sorted[0]]
except Exception as e:
log.error('Failed to extract reference time for bare'
f'Measurement timer. Please fix the error'
f'or pass in a reference time manually.')
raise e
# TODO add more options of how to distribute the bm time in the timer
# (not only start stop but e.g. distribute it)
bmtimer.checkpoint(f"BareMeasurement.{checkpoint}.start",
values=[ref_time], log_init=False)
bmtimer.checkpoint(f"BareMeasurement.{checkpoint}.end",
values=[ ref_time + dt.timedelta(seconds=bmtime)],
log_init=False)
return bmtimer
def bare_measurement_time(self, nr_averages=None, repetition_rate=None,
count_nan_measurements=False):
det_metadata = self.metadata.get("Detector Metadata", None)
if nr_averages is None:
nr_averages = self.get_param_value('nr_averages', None)
if det_metadata is not None and nr_averages is None:
# multi detector function: look for child "detectors"
# assumes at least 1 child and that all children have the same
# number of averages
det = list(det_metadata.get('detectors', {}).values())[0]
nr_averages = det.get('nr_averages', det.get('nr_shots', None))
if nr_averages is None:
raise ValueError('Could not extract nr_averages/nr_shots from hdf file.'
'Please specify "nr_averages" in options_dict.')
self.nr_averages = nr_averages
n_hsp = len(self.raw_data_dict['hard_sweep_points'])
n_ssp = len(self.raw_data_dict.get('soft_sweep_points', [0]))
if repetition_rate is None:
repetition_rate = self.raw_data_dict["repetition_rate"]
if count_nan_measurements:
perc_meas = 1
else:
# When sweep points are skipped, data is missing in all columns
# Thus, we can simply check in the first column.
vals = list(self.raw_data_dict['measured_data'].values())[0]
perc_meas = 1 - np.sum(np.isnan(vals)) / np.prod(vals.shape)
return self._bare_measurement_time(n_ssp, n_hsp, repetition_rate,
nr_averages, perc_meas)
@staticmethod
def _bare_measurement_time(n_ssp, n_hsp, repetition_rate, nr_averages,
percentage_measured):
return n_ssp * n_hsp * repetition_rate * nr_averages \
* percentage_measured
class MixerCarrierAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the :py:meth:~'QuDev_transmon.calibrate_drive_mixer_carrier_model' measurement.
The class extracts the DC biases on the I and Q channel inputs of the
measured IQ mixer that minimize the LO leakage.
"""
def extract_data(self):
super().extract_data()
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
hsp = self.raw_data_dict['hard_sweep_points']
ssp = self.raw_data_dict['soft_sweep_points']
mdata = self.raw_data_dict['measured_data']
# Conversion from V_peak -> V_RMS
V_RMS = list(mdata.values())[0]/np.sqrt(2)
# Conversion to P (dBm):
# P = V_RMS^2 / 50 Ohms
# P (dBm) = 10 * log10(P / 1 mW)
# P (dBm) = 10 * log10(V_RMS^2 / 50 Ohms / 1 mW)
# P (dBm) = 20 * log10(V_RMS) - 10 * log10(50 Ohms * 1 mW)
LO_dBm = 20*np.log10(V_RMS) - 10 * np.log10(50 * 1e-3)
VI = hsp
VQ = ssp
if len(hsp) * len(ssp) == len(LO_dBm.flatten()):
VI, VQ = np.meshgrid(hsp, ssp)
VI = VI.flatten()
VQ = VQ.flatten()
LO_dBm = LO_dBm.T.flatten()
self.proc_data_dict['V_I'] = VI
self.proc_data_dict['V_Q'] = VQ
self.proc_data_dict['LO_leakage'] = LO_dBm
self.proc_data_dict['data_to_fit'] = LO_dBm
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
VI = self.proc_data_dict['V_I']
VQ = self.proc_data_dict['V_Q']
data = self.proc_data_dict['data_to_fit']
mixer_lo_leakage_mod = lmfit.Model(fit_mods.mixer_lo_leakage,
independent_vars=['vi', 'vq'])
# Use two lowest values in measurements to choose
# initial model parameters.
VI_two_lowest = VI[np.argpartition(data, 2)][0:2]
VQ_two_lowest = VQ[np.argpartition(data, 2)][0:2]
minimum = - np.mean(VI_two_lowest) + 1j * np.mean(VQ_two_lowest)
li_guess = np.abs(minimum)
theta_i_guess = cmath.phase(minimum)
guess_pars = fit_mods.mixer_lo_leakage_guess(mixer_lo_leakage_mod,
li=li_guess,
theta_i=theta_i_guess)
self.fit_dicts['mixer_lo_leakage'] = {
'model': mixer_lo_leakage_mod,
'fit_xvals': {'vi': VI,
'vq': VQ},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
fit_dict = self.fit_dicts['mixer_lo_leakage']
best_values = fit_dict['fit_res'].best_values
# compute values that minimize the fitted model:
leakage = best_values['li'] * np.exp(1j* best_values['theta_i']) \
- 1j * best_values['lq'] * np.exp(1j*best_values['theta_q'])
adict = self.proc_data_dict['analysis_params_dict']
adict['V_I'] = -leakage.real
adict['V_Q'] = leakage.imag
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
V_I = self.proc_data_dict['V_I']
V_Q = self.proc_data_dict['V_Q']
timestamp = self.timestamps[0]
if self.do_fitting:
# interpolate data for plot,
# define grid with limits based on measurement
# points and make it 10 % larger in both axes
size_offset_vi = 0.05 * (np.max(V_I) - np.min(V_I))
size_offset_vq = 0.05 * (np.max(V_Q) - np.min(V_Q))
vi = np.linspace(np.min(V_I) - size_offset_vi,
np.max(V_I) + size_offset_vi, 250)
vq = np.linspace(np.min(V_Q) - size_offset_vq,
np.max(V_Q) + size_offset_vq, 250)
V_I_plot, V_Q_plot = np.meshgrid(vi, vq)
fit_dict = self.fit_dicts['mixer_lo_leakage']
fit_res = fit_dict['fit_res']
best_values = fit_res.best_values
model_func = fit_dict['model'].func
z = model_func(V_I_plot, V_Q_plot, **best_values)
base_plot_name = 'mixer_lo_leakage'
self.plot_dicts['base_contour'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_contourf,
'xvals': V_I_plot,
'yvals': V_Q_plot,
'zvals': z,
'xlabel': 'Offset, $V_\\mathrm{I}$',
'ylabel': 'Offset, $V_\\mathrm{Q}$',
'xunit': 'V',
'yunit': 'V',
'setlabel': 'lo leakage magnitude',
'cmap': 'plasma',
'cmap_levels': 100,
'clabel': 'Carrier Leakage $V_\\mathrm{LO}$ (dBm)',
'title': f'{timestamp} calibrate_drive_mixer_carrier_'
f'{self.qb_names[0]}'
}
self.plot_dicts['base_measurement_points'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': V_I,
'yvals': V_Q,
'color': 'white',
'marker': '.',
'linestyle': 'None',
'setlabel': ''
}
V_I_opt = self.proc_data_dict['analysis_params_dict']['V_I']
V_Q_opt = self.proc_data_dict['analysis_params_dict']['V_Q']
self.plot_dicts['base_minimum'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([V_I_opt]),
'yvals': np.array([V_Q_opt]),
'setlabel': '$V_\\mathrm{I}$' + f' ={V_I_opt*1e3:.1f}$\,$mV\n'
'$V_\\mathrm{Q}$' + f' ={V_Q_opt*1e3:.1f}$\,$mV',
'color': 'red',
'marker': 'o',
'linestyle': 'None',
'do_legend': True,
'legend_pos': 'upper right',
'legend_title': None,
'legend_frameon': True
}
for ch in ['I', 'Q']:
plot_name = f'V_{ch}_vs_LO_magn'
leakage = self.proc_data_dict['LO_leakage']
self.plot_dicts[f'raw_V_{ch}_vs_LO_magn'] = {
'fig_id': plot_name,
'plotfn': self.plot_line,
'xvals': self.proc_data_dict[f'V_{ch}'],
'yvals': leakage,
'color': 'blue',
'marker': '.',
'linestyle': 'None',
'xlabel': f'Offset, $V_\\mathrm{{{ch}}}$',
'ylabel': 'Carrier Leakage $V_\\mathrm{LO}$',
'xunit': 'V',
'yunit': 'dBm',
'title': f'{timestamp} {self.qb_names[0]}\n$V_\\mathrm{{LO}}$ '
f'projected onto offset $V_\\mathrm{{{ch}}}$'
}
if self.do_fitting:
optimum =self.proc_data_dict['analysis_params_dict']['V_'+ch]
y_min = np.min(leakage)
y_max = np.max(leakage)
self.plot_dicts[f'optimum_V_{ch}_vs_LO_magn'] = {
'fig_id': plot_name,
'plotfn': self.plot_line,
'xvals': np.array([optimum, optimum]),
'yvals': np.array([y_min, y_max]),
'color': 'red',
'marker': 'None',
'linestyle': '--'
}
class MixerSkewnessAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the :py:meth:~'QuDev_transmon.calibrate_drive_mixer_skewness_model' measurement.
The class extracts the phase and amplitude correction settings of the Q
channel input of the measured IQ mixer that maximize the suppression of the
unwanted sideband.
"""
def extract_data(self):
super().extract_data()
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
hsp = self.raw_data_dict['hard_sweep_points']
ssp = self.raw_data_dict['soft_sweep_points']
mdata = self.raw_data_dict['measured_data']
sideband_I, sideband_Q = list(mdata.values())
if len(hsp) * len(ssp) == len(sideband_I.flatten()):
# The arrays hsp and ssp define the edges of a grid of measured
# points. We reshape the arrays such that each data point
# sideband_I/Q[i] corresponds to the sweep point alpha[i], phase[i]
alpha, phase = np.meshgrid(hsp, ssp)
alpha = alpha.flatten()
phase = phase.flatten()
sideband_I = sideband_I.T.flatten()
sideband_Q = sideband_Q.T.flatten()
else:
alpha = hsp
phase = ssp
# Conversion from V_peak -> V_RMS
# V_RMS = sqrt(V_peak_I^2 + V_peak_Q^2)/sqrt(2)
# Conversion to P (dBm):
# P = V_RMS^2 / 50 Ohms
# P (dBm) = 10 * log10(P / 1 mW)
# P (dBm) = 10 * log10(V_RMS^2 / 50 Ohms / 1 mW)
# P (dBm) = 10 * log10(V_RMS^2) - 10 * log10(50 Ohms * 1 mW)
# P (dBm) = 10 * log10(V_peak_I^2 + V_peak_Q^2)
# - 10 * log10(2 * 50 Ohms * 1 mW)
sideband_dBm_amp = 10 * np.log10(sideband_I**2 + sideband_Q**2) \
- 10 * np.log10(2 * 50 * 1e-3)
self.proc_data_dict['alpha'] = alpha
self.proc_data_dict['phase'] = phase
self.proc_data_dict['sideband_I'] = sideband_I
self.proc_data_dict['sideband_Q'] = sideband_Q
self.proc_data_dict['sideband_dBm_amp'] = sideband_dBm_amp
self.proc_data_dict['data_to_fit'] = sideband_dBm_amp
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
data = self.proc_data_dict['data_to_fit']
mixer_imbalance_sideband_mod = lmfit.Model(
fit_mods.mixer_imbalance_sideband,
independent_vars=['alpha', 'phi_skew']
)
# Use two lowest values in measurements to choose
# initial model parameters.
alpha_two_lowest = self.proc_data_dict['alpha'][np.argpartition(data, 2)][0:2]
phi_two_lowest = self.proc_data_dict['phase'][np.argpartition(data, 2)][0:2]
g_guess = np.mean(alpha_two_lowest)
phi_guess = - np.mean(phi_two_lowest)
guess_pars = fit_mods.mixer_imbalance_sideband_guess(
mixer_imbalance_sideband_mod,
g=g_guess,
phi=phi_guess
)
self.fit_dicts['mixer_imbalance_sideband'] = {
'model': mixer_imbalance_sideband_mod,
'fit_xvals': {'alpha': self.proc_data_dict['alpha'],
'phi_skew': self.proc_data_dict['phase']},
'fit_yvals': {'data': self.proc_data_dict['data_to_fit']},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
fit_dict = self.fit_dicts['mixer_imbalance_sideband']
best_values = fit_dict['fit_res'].best_values
self.proc_data_dict['analysis_params_dict']['alpha'] = best_values['g']
self.proc_data_dict['analysis_params_dict']['phase'] = -best_values['phi']
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
pdict = self.proc_data_dict
alpha = pdict['alpha']
phase = pdict['phase']
timestamp = self.timestamps[0]
if self.do_fitting:
# define grid with limits based on measurement points
# and make it 10 % larger in both axes
size_offset_alpha = 0.05*(np.max(alpha)-np.min(alpha))
size_offset_phase = 0.05*(np.max(phase)-np.min(phase))
xi = np.linspace(np.min(alpha) - size_offset_alpha,
np.max(alpha) + size_offset_alpha, 250)
yi = np.linspace(np.min(phase) - size_offset_phase,
np.max(phase) + size_offset_phase, 250)
x, y = np.meshgrid(xi, yi)
fit_dict = self.fit_dicts['mixer_imbalance_sideband']
fit_res = fit_dict['fit_res']
best_values = fit_res.best_values
model_func = fit_dict['model'].func
z = model_func(x, y, **best_values)
base_plot_name = 'mixer_sideband_suppression'
self.plot_dicts['base_contour'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_contourf,
'xvals': x,
'yvals': y,
'zvals': z,
'xlabel': 'Ampl., Ratio, $\\alpha_\\mathrm{IQ}$',
'ylabel': 'Phase Off., $\\Delta\\phi_\\mathrm{IQ}$',
'xunit': '',
'yunit': 'deg',
'setlabel': 'sideband magnitude',
'cmap': 'plasma',
'cmap_levels': 100,
'clabel': 'Sideband Leakage $V_\\mathrm{LO-IF}$ (dBm)',
'title': f'{timestamp} calibrate_drive_mixer_skewness_'
f'{self.qb_names[0]}'
}
self.plot_dicts['base_measurement_points'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': alpha,
'yvals': phase,
'color': 'white',
'marker': '.',
'linestyle': 'None',
'setlabel': '',
}
alpha_min = pdict['analysis_params_dict']['alpha']
phase_min = pdict['analysis_params_dict']['phase']
self.plot_dicts['base_minimum'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([alpha_min]),
'yvals': np.array([phase_min]),
'setlabel': f'$\\alpha$ ={alpha_min:.2f}\n'
f'$\phi$ ={phase_min:.2f}$^\\circ$',
'color': 'red',
'marker': 'o',
'linestyle': 'None',
'do_legend': True,
'legend_pos': 'upper right',
'legend_title': None,
'legend_frameon': True
}
raw_alpha_plot_name = 'alpha_vs_sb_magn'
self.plot_dicts['raw_alpha_vs_sb_magn'] = {
'fig_id': raw_alpha_plot_name,
'plotfn': self.plot_line,
'xvals': alpha,
'yvals': pdict['sideband_dBm_amp'],
'color': 'blue',
'marker': '.',
'linestyle': 'None',
'xlabel': 'Ampl., Ratio, $\\alpha_\\mathrm{IQ}$',
'ylabel': 'Sideband Leakage $V_\\mathrm{LO-IF}$',
'xunit': '',
'yunit': 'dBm',
'title': f'{timestamp} {self.qb_names[0]}\n$V_\\mathrm{{LO-IF}}$ '
f'projected onto ampl. ratio $\\alpha_\\mathrm{{IQ}}$'
}
if self.do_fitting:
self.plot_dicts['optimum_in_alpha_vs_sb_magn'] = {
'fig_id': raw_alpha_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([alpha_min, alpha_min]),
'yvals': np.array([np.min(pdict['sideband_dBm_amp']),
np.max(pdict['sideband_dBm_amp'])]),
'color': 'red',
'marker': 'None',
'linestyle': '--'
}
raw_phase_plot_name = 'phase_vs_sb_magn'
self.plot_dicts['raw_phase_vs_sb_magn'] = {
'fig_id': raw_phase_plot_name,
'plotfn': self.plot_line,
'xvals': phase,
'yvals': pdict['sideband_dBm_amp'],
'color': 'blue',
'marker': '.',
'linestyle': 'None',
'xlabel': 'Phase Off., $\\Delta\\phi_\\mathrm{IQ}$',
'ylabel': 'Sideband Leakage $V_\\mathrm{LO-IF}$',
'xunit': 'deg',
'yunit': 'dBm',
'title': f'{timestamp} {self.qb_names[0]}\n$V_\\mathrm{{LO-IF}}$ '
f'projected onto phase offset $\\Delta\\phi_\\mathrm{{IQ}}$'
}
if self.do_fitting:
self.plot_dicts['optimum_in_phase_vs_sb_magn'] = {
'fig_id': raw_phase_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([phase_min, phase_min]),
'yvals': np.array([np.min(pdict['sideband_dBm_amp']),
np.max(pdict['sideband_dBm_amp'])]),
'color': 'red',
'marker': 'None',
'linestyle': '--'
}
|
QudevETH/PycQED_py3
|
pycqed/analysis_v2/timedomain_analysis.py
|
Python
|
mit
| 499,531
|
[
"Gaussian"
] |
654821a744bfba112b3b320884181fa411834ba5b53fd6a99fad06402d873716
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line skeleton application for Calendar API.
Usage:
$ python sample.py
You can also get help on all the command-line flags the program understands
by running:
$ python sample.py --help
"""
import argparse
import httplib2
import os
import sys
from apiclient import discovery
from oauth2client import file
from oauth2client import client
from oauth2client import tools
# Parser for command-line arguments.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
# CLIENT_SECRETS is name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret. You can see the Client ID
# and Client secret on the APIs page in the Cloud Console:
# <https://cloud.google.com/console#/project/726656680991/apiui>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Set up a Flow object to be used for authentication.
# Add one or more of the following scopes. PLEASE ONLY ADD THE SCOPES YOU
# NEED. For more information on using scopes please see
# <https://developers.google.com/+/best-practices>.
FLOW = client.flow_from_clientsecrets(CLIENT_SECRETS,
scope=[
'https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly',
],
message=tools.message_if_missing(CLIENT_SECRETS))
def main(argv):
# Parse the command-line flags.
flags = parser.parse_args(argv[1:])
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to the file.
storage = file.Storage('sample.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(FLOW, storage, flags)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Construct the service object for the interacting with the Calendar API.
service = discovery.build('calendar', 'v3', http=http)
try:
print "Success! Now add code here."
except client.AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
# For more information on the Calendar API you can visit:
#
# https://developers.google.com/google-apps/calendar/firstapp
#
# For more information on the Calendar API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
|
pseudovirtual/earnings-calendar
|
gcal-boilerplate/sample.py
|
Python
|
mit
| 3,485
|
[
"VisIt"
] |
a4b13835e05c2d7823f9ae9e9334d79b678f76a0797f4575ef9fbe0d183a9629
|
# Vodka is a lib that extract OpenERP models informations
# Copyright (C) 2013 Laurent Peuch <cortex@worlddomination.be>
# Copyright (C) 2013 Railnova SPRL <railnova@railnova.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import _ast
import ast
import subprocess
from ConfigParser import ConfigParser
from path import path
from bs4 import BeautifulSoup
def format_xml(to_write):
xmllint_is_installed = subprocess.Popen(['which', 'xmllint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
if not xmllint_is_installed:
return to_write
formated, err = subprocess.Popen(['xmllint', '--format', '/dev/stdin'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(to_write)
if not err:
# remove <?xml ...> stuff
to_write = "\n".join(formated.split("\n")[1:])
return to_write
def parse_attr(attr):
to_return = []
while isinstance(attr, _ast.Attribute):
to_return.append(attr.attr)
attr = attr.value
to_return.append(get_value(attr))
return ".".join(reversed(to_return))
def get_value(elt):
if isinstance(elt, _ast.Num):
return elt.n
elif isinstance(elt, _ast.Name):
return elt.id
elif isinstance(elt, _ast.Str):
return elt.s
elif isinstance(elt, (_ast.List, _ast.Tuple)):
return map(get_value, elt.elts)
elif isinstance(elt, _ast.Dict):
return dict(zip(map(get_value, elt.keys), map(get_value, elt.values)))
elif isinstance(elt, _ast.Lambda):
return "lambda"
elif isinstance(elt, _ast.Call) and isinstance(elt.func, _ast.Name):
# TODO: handle keywords
return "%s(%s)" % (elt.func.id, map(get_value, elt.args) if elt.args else "")
elif isinstance(elt, _ast.Call):
return parse_attr(elt.func)
elif isinstance(elt, _ast.Attribute):
return parse_attr(elt)
elif isinstance(elt, _ast.BinOp) and isinstance(elt.op, _ast.Add):
return parse_attr(elt.left) + parse_attr(elt.right)
else:
raise Exception(elt)
def parse_gettext(string_or_call):
if isinstance(string_or_call, _ast.Str):
return string_or_call.s
elif isinstance(string_or_call, _ast.Call):
if string_or_call.func.id not in ("_", "gettext", "translate"):
raise Exception("was expecting a gettext call, got '%s' instead" % string_or_call.func.id)
return string_or_call.args[0].s
elif isinstance(string_or_call, _ast.Num):
return string_or_call.n
else:
raise ValueError("parse_gettext is expecting either a _ast.Str or a _ast.Call")
class ClassFinder(ast.NodeVisitor):
def __init__(self):
self.models = {}
def is_oerp_mode(self, class_node):
for i in class_node.bases:
if isinstance(i, _ast.Name) and i.id == "osv":
return True
if isinstance(i, _ast.Attribute) and i.attr == "osv" and i.value.id == "osv":
return True
return False
def visit_ClassDef(self, class_node):
if not self.is_oerp_mode(class_node):
return
self.models[class_node.name] = {"class_name": class_node.name}
self.models[class_node.name]["lineno"] = {"class": class_node.lineno}
self.models[class_node.name]["methods"] = []
KeyAttributesFinder(self.models[class_node.name]).visit(class_node)
class KeyAttributesFinder(ast.NodeVisitor):
def __init__(self, model):
self.model = model
def visit_Assign(self, assign_node):
if assign_node.targets[0].id in ("_name", "_inherit"):
if isinstance(assign_node.value, _ast.List):
value = map(lambda x: x.s, assign_node.value.elts)
self.model[assign_node.targets[0].id] = value if len(value) > 1 else value[0]
else:
self.model[assign_node.targets[0].id] = assign_node.value.s
if assign_node.targets[0].id == "_inherit" and not self.model.has_key("_name"):
self.model["_name"] = self.model["_inherit"]
if assign_node.targets[0].id == "_columns":
self.model[assign_node.targets[0].id] = self.parse_columns(assign_node.value)
self.model["lineno"]["_columns"] = assign_node.lineno
def visit_FunctionDef(self, function_node):
self.model["methods"].append({
"name": function_node.name,
"lineno": function_node.lineno,
"args": map(lambda x: get_value(x), function_node.args.args),
"defaults": map(lambda x: get_value(x), function_node.args.defaults),
"kwarg": function_node.args.kwarg,
"vararg": function_node.args.vararg,
})
def parse_columns(self, columns):
handle_args = {
"one2many": self.handle_one2many,
"many2one": self.handle_many2one,
"many2many": self.handle_many2many,
"selection": self.handle_selection,
"function": self.handle_function,
"related": self.handle_related,
}
to_return = []
for key, value in zip(columns.keys, columns.values):
row = {"name": key.s, "lineno": key.lineno}
if isinstance(getattr(value, "func", None), _ast.Name): # for ppl that overwrite fields class
row["type"] = value.func.id
continue
elif not hasattr(value, "func"): # drop hacks from other ppl
continue
row["type"] = value.func.attr
handle_args.get(row["type"], self.handle_generic)(value.args, row)
for kwarg in value.keywords:
if row["type"] == "related" and kwarg.arg == "type":
row["related_type"] = get_value(kwarg.value)
else:
row[kwarg.arg] = get_value(kwarg.value)
to_return.append(row)
return to_return
def handle_generic(self, args, row):
for arg in args:
if isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
try:
row["string"] = unicode(parse_gettext(arg))
except UnicodeDecodeError:
pass
try:
row["string"] = parse_gettext(arg).decode("Utf-8")
except UnicodeEncodeError:
pass
row["string"] = parse_gettext(arg)
else:
raise
def handle_one2many(self, args, row):
for arg in args:
if isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("relation"):
row["relation"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("field"):
row["field"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
row["string"] = parse_gettext(arg)
else:
raise
def handle_many2one(self, args, row):
for arg in args:
if isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("relation"):
row["relation"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
row["string"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)):
raise
else:
raise
def handle_selection(self, args, row):
for arg in args:
if isinstance(arg, (_ast.List, _ast.Tuple)):
row["selection"] = map(lambda x: [parse_gettext(x.elts[0]), parse_gettext(x.elts[1])], arg.elts)
row["is_function"] = False
elif isinstance(arg, _ast.Name):
row["selection"] = arg.id
row["is_function"] = True
elif isinstance(arg, _ast.Attribute):
row["selection"] = parse_attr(arg)
row["is_function"] = True
elif isinstance(arg, _ast.Call) and not row.get("selection") and (not isinstance(arg.func, _ast.Name) or arg.func.id not in ("_", "gettext", "translate")):
row["selection"] = parse_attr(arg.func)
row["is_function"] = True
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
row["string"] = parse_gettext(arg)
else:
raise
def handle_function(self, args, row):
for arg in args:
if isinstance(arg, _ast.Name):
row["function"] = arg.id
elif isinstance(arg, _ast.Attribute):
row["function"] = parse_attr(arg)
elif isinstance(arg, _ast.Lambda):
row["function"] = "lambda"
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
row["string"] = parse_gettext(arg)
else:
raise
def handle_many2many(self, args, row):
for arg in args:
if isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("relation"):
row["relation"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("relation_table"):
row["relation_table"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("field1"):
row["field1"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("field2"):
row["field2"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("string"):
row["string"] = parse_gettext(arg)
elif isinstance(arg, (_ast.Str, _ast.Call)):
raise
else:
raise
def handle_related(self, args, row):
for arg in args:
if isinstance(arg, (_ast.Str, _ast.Call)) and not row.get("relation"):
row["relation"] = [parse_gettext(arg)]
elif isinstance(arg, (_ast.Str, _ast.Call)):
row["relation"].append(parse_gettext(arg))
else:
raise
def get_classes_from_string(string):
class_finder = ClassFinder()
class_finder.visit(ast.parse(string))
return class_finder.models
def get_views_from_string(string):
def get_field(view, name, default=None):
# stupid bug in BS, you can't search on 'name=' since name is the
# keyword for tag_name
field_model = filter(lambda x: x.get('name') == name, view('field', recursive=False))
if field_model:
return field_model[0]
return default
soup = BeautifulSoup(string, features="xml")
xml = {"views": {}, "actions": {}}
if not soup.openerp or not soup.openerp.data:
return xml
for view in soup.openerp.data("record"):
if not view.get("id"):
continue
if view.get("model") == 'ir.ui.view':
field_model = get_field(view, "model")
if field_model is None:
continue
xml["views"][view["id"]] = {"model": field_model.text, "string": format_xml(str(view)), "type": getattr(get_field(view, "type"), "text", None)}
elif view.get("model") == "ir.actions.act_window":
field_model = get_field(view, "res_model")
if field_model is None:
continue
xml["actions"][view["id"]] = {"model": field_model.text, "string": format_xml(str(view))}
if get_field(view, "view_type"):
xml["actions"][view["id"]]["view_type"] = get_field(view, "view_type").text
if get_field(view, "view_mode"):
xml["actions"][view["id"]]["view_mode"] = get_field(view, "view_mode").text
return xml
def get_classes_from_config_file(config_path="~/.openerp_serverrc"):
addons = {}
config_parser = ConfigParser()
config_parser.readfp(open(os.path.expanduser(config_path)))
addons_folders = map(lambda x: x.strip(), config_parser.get("options", "addons_path").split(","))
for addons_folder in addons_folders:
addons_folder = path(addons_folder)
for addon in addons_folder.dirs():
addons[addon.name] = {}
if addon.joinpath("__openerp__.py").exists():
addons[addon.name]["__openerp__"] = eval(open(addon.joinpath("__openerp__.py"), "r").read())
elif addon.joinpath("__terp__.py").exists():
addons[addon.name]["__openerp__"] = eval(open(addon.joinpath("__terp__.py"), "r").read())
else:
del addons[addon.name]
continue
addons[addon.name]["models"] = {}
for python_file in addon.walk("*.py"):
if python_file.name.startswith("_"):
continue
models = get_classes_from_string(open(python_file).read())
for model in models.keys():
models[model]["file"] = python_file
addons[addon.name]["models"].update(models)
addons[addon.name]["xml"] = {"views": {}, "actions": {}}
for xml_file in addon.walk("*.xml"):
xml = get_views_from_string(open(xml_file, "r").read())
addons[addon.name]["xml"]["views"].update(xml["views"])
addons[addon.name]["xml"]["actions"].update(xml["actions"])
return addons
if __name__ == '__main__':
import json
open("db.json", "w").write(json.dumps(get_classes_from_config_file(), indent=4))
#a = get_classes_from_string(open("/home/psycojoker/railnova/railfleet-modules/railfleet_maintenance_alstom/maintenance.py").read())
#from pprint import pprint
#pprint(a)
#from ipdb import set_trace; set_trace()
|
Psycojoker/vodka
|
vodka.py
|
Python
|
gpl-3.0
| 14,429
|
[
"VisIt"
] |
d94385eaf98b49cf52df3b8631bae4eafe78a3f6c03ceb33627fcb57503dbb89
|
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
codewarrior0/pytest
|
_pytest/python.py
|
Python
|
mit
| 84,482
|
[
"VisIt"
] |
f9cacc7cf719dfd703ae8eedc3b187555dcf99e36d0e99949a6d3a47d0143c53
|
"""
"""
import inspect
import os
import hashlib
import random
import socket
import string
import time
from Cookie import CookieError
from galaxy import eggs
eggs.require( "Cheetah" )
from Cheetah.Template import Template
eggs.require( "Mako" )
import mako.runtime
import mako.lookup
# pytz is used by Babel.
eggs.require( "pytz" )
eggs.require( "Babel" )
from babel.support import Translations
from babel import Locale
eggs.require( "SQLAlchemy >= 0.4" )
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
from galaxy.exceptions import MessageException
from galaxy import util
from galaxy.util import asbool
from galaxy.util import safe_str_cmp
from galaxy.util.backports.importlib import import_module
from galaxy.util.sanitize_html import sanitize_html
from galaxy.managers import context
from galaxy.web.framework import url_for
from galaxy.web.framework import base
from galaxy.web.framework import helpers
from galaxy.web.framework import formbuilder
import logging
log = logging.getLogger( __name__ )
UCSC_SERVERS = (
'hgw1.cse.ucsc.edu',
'hgw2.cse.ucsc.edu',
'hgw3.cse.ucsc.edu',
'hgw4.cse.ucsc.edu',
'hgw5.cse.ucsc.edu',
'hgw6.cse.ucsc.edu',
'hgw7.cse.ucsc.edu',
'hgw8.cse.ucsc.edu',
)
class WebApplication( base.WebApplication ):
"""
Base WSGI application instantiated for all Galaxy webapps.
A web application that:
* adds API and UI controllers by scanning given directories and
importing all modules found there.
* has a security object.
* builds mako template lookups.
* generates GalaxyWebTransactions.
"""
def __init__( self, galaxy_app, session_cookie='galaxysession', name=None ):
self.name = name
base.WebApplication.__init__( self )
self.set_transaction_factory( lambda e: self.transaction_chooser( e, galaxy_app, session_cookie ) )
# Mako support
self.mako_template_lookup = self.create_mako_template_lookup( galaxy_app, name )
# Security helper
self.security = galaxy_app.security
def create_mako_template_lookup( self, galaxy_app, name ):
paths = []
# First look in webapp specific directory
if name is not None:
paths.append( os.path.join( galaxy_app.config.template_path, 'webapps', name ) )
# Then look in root directory
paths.append( galaxy_app.config.template_path )
# Create TemplateLookup with a small cache
return mako.lookup.TemplateLookup(directories=paths,
module_directory=galaxy_app.config.template_cache,
collection_size=500,
output_encoding='utf-8' )
def handle_controller_exception( self, e, trans, **kwargs ):
if isinstance( e, MessageException ):
# In the case of a controller exception, sanitize to make sure
# unsafe html input isn't reflected back to the user
return trans.show_message( sanitize_html(e.err_msg), e.type )
def make_body_iterable( self, trans, body ):
if isinstance( body, formbuilder.FormBuilder ):
body = trans.show_form( body )
return base.WebApplication.make_body_iterable( self, trans, body )
def transaction_chooser( self, environ, galaxy_app, session_cookie ):
return GalaxyWebTransaction( environ, galaxy_app, self, session_cookie )
def add_ui_controllers( self, package_name, app ):
"""
Search for UI controllers in `package_name` and add
them to the webapp.
"""
from galaxy.web.base.controller import BaseUIController
from galaxy.web.base.controller import ControllerUnavailable
package = import_module( package_name )
controller_dir = package.__path__[0]
for fname in os.listdir( controller_dir ):
if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
name = fname[:-3]
module_name = package_name + "." + name
try:
module = import_module( module_name )
except ControllerUnavailable, exc:
log.debug("%s could not be loaded: %s" % (module_name, str(exc)))
continue
# Look for a controller inside the modules
for key in dir( module ):
T = getattr( module, key )
if inspect.isclass( T ) and T is not BaseUIController and issubclass( T, BaseUIController ):
controller = self._instantiate_controller( T, app )
self.add_ui_controller( name, controller )
def add_api_controllers( self, package_name, app ):
"""
Search for UI controllers in `package_name` and add
them to the webapp.
"""
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import ControllerUnavailable
package = import_module( package_name )
controller_dir = package.__path__[0]
for fname in os.listdir( controller_dir ):
if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
name = fname[:-3]
module_name = package_name + "." + name
try:
module = import_module( module_name )
except ControllerUnavailable, exc:
log.debug("%s could not be loaded: %s" % (module_name, str(exc)))
continue
for key in dir( module ):
T = getattr( module, key )
# Exclude classes such as BaseAPIController and BaseTagItemsController
if inspect.isclass( T ) and not key.startswith("Base") and issubclass( T, BaseAPIController ):
# By default use module_name, but allow controller to override name
controller_name = getattr( T, "controller_name", name )
controller = self._instantiate_controller( T, app )
self.add_api_controller( controller_name, controller )
def _instantiate_controller( self, T, app ):
""" Extension point, allow apps to contstruct controllers differently,
really just used to stub out actual controllers for routes testing.
"""
return T( app )
class GalaxyWebTransaction( base.DefaultWebTransaction,
context.ProvidesAppContext, context.ProvidesUserContext, context.ProvidesHistoryContext ):
"""
Encapsulates web transaction specific state for the Galaxy application
(specifically the user's "cookie" session and history)
"""
def __init__( self, environ, app, webapp, session_cookie=None):
self.app = app
self.webapp = webapp
self.security = webapp.security
base.DefaultWebTransaction.__init__( self, environ )
self.setup_i18n()
self.expunge_all()
self.debug = asbool( self.app.config.get( 'debug', False ) )
# Flag indicating whether we are in workflow building mode (means
# that the current history should not be used for parameter values
# and such).
self.workflow_building_mode = False
# Flag indicating whether this is an API call and the API key user is an administrator
self.api_inherit_admin = False
self.__user = None
self.galaxy_session = None
self.error_message = None
if self.environ.get('is_api_request', False):
# With API requests, if there's a key, use it and associate the
# user with the transaction.
# If not, check for an active session but do not create one.
# If an error message is set here, it's sent back using
# trans.show_error in the response -- in expose_api.
self.error_message = self._authenticate_api( session_cookie )
elif self.app.name == "reports":
self.galaxy_session = None
else:
# This is a web request, get or create session.
self._ensure_valid_session( session_cookie )
if self.galaxy_session:
# When we've authenticated by session, we have to check the
# following.
# Prevent deleted users from accessing Galaxy
if self.app.config.use_remote_user and self.galaxy_session.user.deleted:
self.response.send_redirect( url_for( '/static/user_disabled.html' ) )
if self.app.config.require_login:
self._ensure_logged_in_user( environ, session_cookie )
def setup_i18n( self ):
locales = []
if 'HTTP_ACCEPT_LANGUAGE' in self.environ:
# locales looks something like: ['en', 'en-us;q=0.7', 'ja;q=0.3']
client_locales = self.environ['HTTP_ACCEPT_LANGUAGE'].split( ',' )
for locale in client_locales:
try:
locales.append( Locale.parse( locale.split( ';' )[0].strip(), sep='-' ).language )
except Exception, e:
log.debug( "Error parsing locale '%s'. %s: %s", locale, type( e ), e )
if not locales:
# Default to English
locales = 'en'
t = Translations.load( dirname='locale', locales=locales, domain='ginga' )
self.template_context.update( dict( _=t.ugettext, n_=t.ugettext, N_=t.ungettext ) )
def get_user( self ):
"""Return the current user if logged in or None."""
if self.galaxy_session:
return self.galaxy_session.user
else:
return self.__user
def set_user( self, user ):
"""Set the current user."""
if self.galaxy_session:
self.galaxy_session.user = user
self.sa_session.add( self.galaxy_session )
self.sa_session.flush()
self.__user = user
user = property( get_user, set_user )
def get_cookie( self, name='galaxysession' ):
"""Convenience method for getting a session cookie"""
try:
# If we've changed the cookie during the request return the new value
if name in self.response.cookies:
return self.response.cookies[name].value
else:
return self.request.cookies[name].value
except:
return None
def set_cookie( self, value, name='galaxysession', path='/', age=90, version='1' ):
"""Convenience method for setting a session cookie"""
# The galaxysession cookie value must be a high entropy 128 bit random number encrypted
# using a server secret key. Any other value is invalid and could pose security issues.
self.response.cookies[name] = value
self.response.cookies[name]['path'] = path
self.response.cookies[name]['max-age'] = 3600 * 24 * age # 90 days
tstamp = time.localtime( time.time() + 3600 * 24 * age )
self.response.cookies[name]['expires'] = time.strftime( '%a, %d-%b-%Y %H:%M:%S GMT', tstamp )
self.response.cookies[name]['version'] = version
try:
self.response.cookies[name]['httponly'] = True
except CookieError, e:
log.warning( "Error setting httponly attribute in cookie '%s': %s" % ( name, e ) )
def _authenticate_api( self, session_cookie ):
"""
Authenticate for the API via key or session (if available).
"""
api_key = self.request.params.get('key', None)
secure_id = self.get_cookie( name=session_cookie )
api_key_supplied = self.environ.get('is_api_request', False) and api_key
if api_key_supplied and self._check_master_api_key( api_key ):
self.api_inherit_admin = True
log.info( "Session authenticated using Galaxy master api key" )
self.user = None
self.galaxy_session = None
elif api_key_supplied:
# Sessionless API transaction, we just need to associate a user.
try:
provided_key = self.sa_session.query( self.app.model.APIKeys ).filter( self.app.model.APIKeys.table.c.key == api_key ).one()
except NoResultFound:
return 'Provided API key is not valid.'
if provided_key.user.deleted:
return 'User account is deactivated, please contact an administrator.'
newest_key = provided_key.user.api_keys[0]
if newest_key.key != provided_key.key:
return 'Provided API key has expired.'
self.set_user( provided_key.user )
elif secure_id:
# API authentication via active session
# Associate user using existing session
self._ensure_valid_session( session_cookie )
else:
# Anonymous API interaction -- anything but @expose_api_anonymous will fail past here.
self.user = None
self.galaxy_session = None
def _check_master_api_key( self, api_key ):
master_api_key = getattr( self.app.config, 'master_api_key', None )
if not master_api_key:
return False
# Hash keys to make them the same size, so we can do safe comparison.
master_hash = hashlib.sha256( master_api_key ).hexdigest()
provided_hash = hashlib.sha256( api_key ).hexdigest()
return safe_str_cmp( master_hash, provided_hash )
def _ensure_valid_session( self, session_cookie, create=True):
"""
Ensure that a valid Galaxy session exists and is available as
trans.session (part of initialization)
Support for universe_session and universe_user cookies has been
removed as of 31 Oct 2008.
"""
# Try to load an existing session
secure_id = self.get_cookie( name=session_cookie )
galaxy_session = None
prev_galaxy_session = None
user_for_new_session = None
invalidate_existing_session = False
# Track whether the session has changed so we can avoid calling flush
# in the most common case (session exists and is valid).
galaxy_session_requires_flush = False
if secure_id:
# Decode the cookie value to get the session_key
session_key = self.security.decode_guid( secure_id )
try:
# Make sure we have a valid UTF-8 string
session_key = session_key.encode( 'utf8' )
except UnicodeDecodeError:
# We'll end up creating a new galaxy_session
session_key = None
if session_key:
# Retrieve the galaxy_session id via the unique session_key
galaxy_session = self.sa_session.query( self.app.model.GalaxySession ) \
.filter( and_( self.app.model.GalaxySession.table.c.session_key==session_key, #noqa
self.app.model.GalaxySession.table.c.is_valid==True ) ).first() #noqa
# If remote user is in use it can invalidate the session and in some
# cases won't have a cookie set above, so we need to to check some
# things now.
if self.app.config.use_remote_user:
# If this is an api request, and they've passed a key, we let this go.
assert self.app.config.remote_user_header in self.environ, \
"use_remote_user is set but %s header was not provided" % self.app.config.remote_user_header
remote_user_email = self.environ[ self.app.config.remote_user_header ]
if getattr( self.app.config, "normalize_remote_user_email", False ):
remote_user_email = remote_user_email.lower()
if galaxy_session:
# An existing session, make sure correct association exists
if galaxy_session.user is None:
# No user, associate
galaxy_session.user = self.get_or_create_remote_user( remote_user_email )
galaxy_session_requires_flush = True
elif ((galaxy_session.user.email != remote_user_email) and
((not self.app.config.allow_user_impersonation) or
(remote_user_email not in self.app.config.admin_users_list))):
# Session exists but is not associated with the correct
# remote user, and the currently set remote_user is not a
# potentially impersonating admin.
invalidate_existing_session = True
user_for_new_session = self.get_or_create_remote_user( remote_user_email )
log.warning( "User logged in as '%s' externally, but has a cookie as '%s' invalidating session",
remote_user_email, galaxy_session.user.email )
else:
# No session exists, get/create user for new session
user_for_new_session = self.get_or_create_remote_user( remote_user_email )
else:
if galaxy_session is not None and galaxy_session.user and galaxy_session.user.external:
# Remote user support is not enabled, but there is an existing
# session with an external user, invalidate
invalidate_existing_session = True
log.warning( "User '%s' is an external user with an existing session, invalidating session since external auth is disabled",
galaxy_session.user.email )
elif galaxy_session is not None and galaxy_session.user is not None and galaxy_session.user.deleted:
invalidate_existing_session = True
log.warning( "User '%s' is marked deleted, invalidating session" % galaxy_session.user.email )
# Do we need to invalidate the session for some reason?
if invalidate_existing_session:
prev_galaxy_session = galaxy_session
prev_galaxy_session.is_valid = False
galaxy_session = None
# No relevant cookies, or couldn't find, or invalid, so create a new session
if galaxy_session is None:
galaxy_session = self.__create_new_session( prev_galaxy_session, user_for_new_session )
galaxy_session_requires_flush = True
self.galaxy_session = galaxy_session
self.__update_session_cookie( name=session_cookie )
else:
self.galaxy_session = galaxy_session
# Do we need to flush the session?
if galaxy_session_requires_flush:
self.sa_session.add( galaxy_session )
# FIXME: If prev_session is a proper relation this would not
# be needed.
if prev_galaxy_session:
self.sa_session.add( prev_galaxy_session )
self.sa_session.flush()
# If the old session was invalid, get a new history with our new session
if invalidate_existing_session:
self.new_history()
def _ensure_logged_in_user( self, environ, session_cookie ):
# The value of session_cookie can be one of
# 'galaxysession' or 'galaxycommunitysession'
# Currently this method does nothing unless session_cookie is 'galaxysession'
if session_cookie == 'galaxysession' and self.galaxy_session.user is None:
# TODO: re-engineer to eliminate the use of allowed_paths
# as maintenance overhead is far too high.
allowed_paths = (
url_for( controller='root', action='index' ),
url_for( controller='root', action='tool_menu' ),
url_for( controller='root', action='masthead' ),
url_for( controller='root', action='history' ),
url_for( controller='user', action='api_keys' ),
url_for( controller='user', action='create' ),
url_for( controller='user', action='index' ),
url_for( controller='user', action='login' ),
url_for( controller='user', action='logout' ),
url_for( controller='user', action='manage_user_info' ),
url_for( controller='user', action='set_default_permissions' ),
url_for( controller='user', action='reset_password' ),
url_for( controller='user', action='openid_auth' ),
url_for( controller='user', action='openid_process' ),
url_for( controller='user', action='openid_associate' ),
url_for( controller='library', action='browse' ),
url_for( controller='history', action='list' ),
url_for( controller='dataset', action='list' )
)
display_as = url_for( controller='root', action='display_as' )
if self.app.datatypes_registry.get_display_sites('ucsc') and self.request.path == display_as:
try:
host = socket.gethostbyaddr( self.environ[ 'REMOTE_ADDR' ] )[0]
except( socket.error, socket.herror, socket.gaierror, socket.timeout ):
host = None
if host in UCSC_SERVERS:
return
external_display_path = url_for( controller='', action='display_application' )
if self.request.path.startswith( external_display_path ):
request_path_split = self.request.path.split( '/' )
try:
if (self.app.datatypes_registry.display_applications.get( request_path_split[-5] )
and request_path_split[-4] in self.app.datatypes_registry.display_applications.get( request_path_split[-5] ).links
and request_path_split[-3] != 'None'):
return
except IndexError:
pass
if self.request.path not in allowed_paths:
self.response.send_redirect( url_for( controller='root', action='index' ) )
def __create_new_session( self, prev_galaxy_session=None, user_for_new_session=None ):
"""
Create a new GalaxySession for this request, possibly with a connection
to a previous session (in `prev_galaxy_session`) and an existing user
(in `user_for_new_session`).
Caller is responsible for flushing the returned session.
"""
session_key = self.security.get_new_guid()
galaxy_session = self.app.model.GalaxySession(
session_key=session_key,
is_valid=True,
remote_host=self.request.remote_host,
remote_addr=self.request.remote_addr,
referer=self.request.headers.get( 'Referer', None ) )
if prev_galaxy_session:
# Invalidated an existing session for some reason, keep track
galaxy_session.prev_session_id = prev_galaxy_session.id
if user_for_new_session:
# The new session should be associated with the user
galaxy_session.user = user_for_new_session
return galaxy_session
def get_or_create_remote_user( self, remote_user_email ):
"""
Create a remote user with the email remote_user_email and return it
"""
if not self.app.config.use_remote_user:
return None
if getattr( self.app.config, "normalize_remote_user_email", False ):
remote_user_email = remote_user_email.lower()
user = self.sa_session.query( self.app.model.User
).filter( self.app.model.User.table.c.email==remote_user_email ).first() #noqa
if user:
# GVK: June 29, 2009 - This is to correct the behavior of a previous bug where a private
# role and default user / history permissions were not set for remote users. When a
# remote user authenticates, we'll look for this information, and if missing, create it.
if not self.app.security_agent.get_private_user_role( user ):
self.app.security_agent.create_private_user_role( user )
if 'webapp' not in self.environ or self.environ['webapp'] != 'tool_shed':
if not user.default_permissions:
self.app.security_agent.user_set_default_permissions( user )
self.app.security_agent.user_set_default_permissions( user, history=True, dataset=True )
elif user is None:
username = remote_user_email.split( '@', 1 )[0].lower()
random.seed()
user = self.app.model.User( email=remote_user_email )
user.set_password_cleartext( ''.join( random.sample( string.letters + string.digits, 12 ) ) )
user.external = True
# Replace invalid characters in the username
for char in filter( lambda x: x not in string.ascii_lowercase + string.digits + '-', username ):
username = username.replace( char, '-' )
# Find a unique username - user can change it later
if ( self.sa_session.query( self.app.model.User ).filter_by( username=username ).first() ):
i = 1
while ( self.sa_session.query( self.app.model.User ).filter_by( username=(username + '-' + str(i) ) ).first() ):
i += 1
username += '-' + str(i)
user.username = username
self.sa_session.add( user )
self.sa_session.flush()
self.app.security_agent.create_private_user_role( user )
# We set default user permissions, before we log in and set the default history permissions
if 'webapp' not in self.environ or self.environ['webapp'] != 'tool_shed':
self.app.security_agent.user_set_default_permissions( user )
# self.log_event( "Automatically created account '%s'", user.email )
return user
def __update_session_cookie( self, name='galaxysession' ):
"""
Update the session cookie to match the current session.
"""
self.set_cookie( self.security.encode_guid(self.galaxy_session.session_key ),
name=name, path=self.app.config.cookie_path )
def handle_user_login( self, user ):
"""
Login a new user (possibly newly created)
- create a new session
- associate new session with user
- if old session had a history and it was not associated with a user, associate it with the new session,
otherwise associate the current session's history with the user
- add the disk usage of the current session to the user's total disk usage
"""
# Set the previous session
prev_galaxy_session = self.galaxy_session
prev_galaxy_session.is_valid = False
# Define a new current_session
self.galaxy_session = self.__create_new_session( prev_galaxy_session, user )
if self.webapp.name == 'galaxy':
cookie_name = 'galaxysession'
# Associated the current user's last accessed history (if exists) with their new session
history = None
try:
users_last_session = user.galaxy_sessions[0]
last_accessed = True
except:
users_last_session = None
last_accessed = False
if (prev_galaxy_session.current_history and not
prev_galaxy_session.current_history.deleted and
prev_galaxy_session.current_history.datasets):
if prev_galaxy_session.current_history.user is None or prev_galaxy_session.current_history.user == user:
# If the previous galaxy session had a history, associate it with the new
# session, but only if it didn't belong to a different user.
history = prev_galaxy_session.current_history
if prev_galaxy_session.user is None:
# Increase the user's disk usage by the amount of the previous history's datasets if they didn't already own it.
for hda in history.datasets:
user.total_disk_usage += hda.quota_amount( user )
elif self.galaxy_session.current_history:
history = self.galaxy_session.current_history
if (not history and users_last_session and
users_last_session.current_history and not
users_last_session.current_history.deleted):
history = users_last_session.current_history
elif not history:
history = self.get_history( create=True )
if history not in self.galaxy_session.histories:
self.galaxy_session.add_history( history )
if history.user is None:
history.user = user
self.galaxy_session.current_history = history
if not last_accessed:
# Only set default history permissions if current history is not from a previous session
self.app.security_agent.history_set_default_permissions( history, dataset=True, bypass_manage_permission=True )
self.sa_session.add_all( ( prev_galaxy_session, self.galaxy_session, history ) )
else:
cookie_name = 'galaxycommunitysession'
self.sa_session.add_all( ( prev_galaxy_session, self.galaxy_session ) )
self.sa_session.flush()
# This method is not called from the Galaxy reports, so the cookie will always be galaxysession
self.__update_session_cookie( name=cookie_name )
def handle_user_logout( self, logout_all=False ):
"""
Logout the current user:
- invalidate the current session
- create a new session with no user associated
"""
prev_galaxy_session = self.galaxy_session
prev_galaxy_session.is_valid = False
self.galaxy_session = self.__create_new_session( prev_galaxy_session )
self.sa_session.add_all( ( prev_galaxy_session, self.galaxy_session ) )
galaxy_user_id = prev_galaxy_session.user_id
if logout_all and galaxy_user_id is not None:
for other_galaxy_session in self.sa_session.query( self.app.model.GalaxySession
).filter( and_( self.app.model.GalaxySession.table.c.user_id==galaxy_user_id, #noqa
self.app.model.GalaxySession.table.c.is_valid==True, #noqa
self.app.model.GalaxySession.table.c.id!=prev_galaxy_session.id ) ): #noqa
other_galaxy_session.is_valid = False
self.sa_session.add( other_galaxy_session )
self.sa_session.flush()
if self.webapp.name == 'galaxy':
# This method is not called from the Galaxy reports, so the cookie will always be galaxysession
self.__update_session_cookie( name='galaxysession' )
elif self.webapp.name == 'tool_shed':
self.__update_session_cookie( name='galaxycommunitysession' )
def get_galaxy_session( self ):
"""
Return the current galaxy session
"""
return self.galaxy_session
def get_history( self, create=False ):
"""
Load the current history, creating a new one only if there is not
current history and we're told to create.
Transactions will not always have an active history (API requests), so
None is a valid response.
"""
history = None
if self.galaxy_session:
history = self.galaxy_session.current_history
if not history and util.string_as_bool( create ):
history = self.new_history()
return history
def set_history( self, history ):
if history and not history.deleted:
self.galaxy_session.current_history = history
self.sa_session.add( self.galaxy_session )
self.sa_session.flush()
history = property( get_history, set_history )
def get_or_create_default_history( self ):
"""
Gets or creates a default history and associates it with the current
session.
"""
# There must be a user to fetch a default history.
if not self.galaxy_session.user:
return self.new_history()
# Look for default history that (a) has default name + is not deleted and
# (b) has no datasets. If suitable history found, use it; otherwise, create
# new history.
unnamed_histories = self.sa_session.query( self.app.model.History ).filter_by(
user=self.galaxy_session.user,
name=self.app.model.History.default_name,
deleted=False )
default_history = None
for history in unnamed_histories:
if len( history.datasets ) == 0:
# Found suitable default history.
default_history = history
break
# Set or create hsitory.
if default_history:
history = default_history
self.set_history( history )
else:
history = self.new_history()
return history
def new_history( self, name=None ):
"""
Create a new history and associate it with the current session and
its associated user (if set).
"""
# Create new history
history = self.app.model.History()
if name:
history.name = name
# Associate with session
history.add_galaxy_session( self.galaxy_session )
# Make it the session's current history
self.galaxy_session.current_history = history
# Associate with user
if self.galaxy_session.user:
history.user = self.galaxy_session.user
# Track genome_build with history
history.genome_build = self.app.genome_builds.default_value
# Set the user's default history permissions
self.app.security_agent.history_set_default_permissions( history )
# Save
self.sa_session.add_all( ( self.galaxy_session, history ) )
self.sa_session.flush()
return history
@base.lazy_property
def template_context( self ):
return dict()
def make_form_data( self, name, **kwargs ):
rval = self.template_context[name] = FormData()
rval.values.update( kwargs )
return rval
def set_message( self, message, type=None ):
"""
Convenience method for setting the 'message' and 'message_type'
element of the template context.
"""
self.template_context['message'] = message
if type:
self.template_context['status'] = type
def get_message( self ):
"""
Convenience method for getting the 'message' element of the template
context.
"""
return self.template_context['message']
def show_message( self, message, type='info', refresh_frames=[], cont=None, use_panels=False, active_view="" ):
"""
Convenience method for displaying a simple page with a single message.
`type`: one of "error", "warning", "info", or "done"; determines the
type of dialog box and icon displayed with the message
`refresh_frames`: names of frames in the interface that should be
refreshed when the message is displayed
"""
return self.fill_template( "message.mako", status=type, message=message, refresh_frames=refresh_frames, cont=cont, use_panels=use_panels, active_view=active_view )
def show_error_message( self, message, refresh_frames=[], use_panels=False, active_view="" ):
"""
Convenience method for displaying an error message. See `show_message`.
"""
return self.show_message( message, 'error', refresh_frames, use_panels=use_panels, active_view=active_view )
def show_ok_message( self, message, refresh_frames=[], use_panels=False, active_view="" ):
"""
Convenience method for displaying an ok message. See `show_message`.
"""
return self.show_message( message, 'done', refresh_frames, use_panels=use_panels, active_view=active_view )
def show_warn_message( self, message, refresh_frames=[], use_panels=False, active_view="" ):
"""
Convenience method for displaying an warn message. See `show_message`.
"""
return self.show_message( message, 'warning', refresh_frames, use_panels=use_panels, active_view=active_view )
def show_form( self, form, header=None, template="form.mako", use_panels=False, active_view="" ):
"""
Convenience method for displaying a simple page with a single HTML
form.
"""
return self.fill_template( template, form=form, header=header,
use_panels=( form.use_panels or use_panels ),
active_view=active_view )
def fill_template(self, filename, **kwargs):
"""
Fill in a template, putting any keyword arguments on the context.
"""
# call get_user so we can invalidate sessions from external users,
# if external auth has been disabled.
self.get_user()
if filename.endswith( ".mako" ):
return self.fill_template_mako( filename, **kwargs )
else:
template = Template( file=os.path.join(self.app.config.template_path, filename),
searchList=[kwargs, self.template_context, dict(caller=self, t=self, h=helpers, util=util, request=self.request, response=self.response, app=self.app)] )
return str( template )
def fill_template_mako( self, filename, template_lookup=None, **kwargs ):
template_lookup = template_lookup or self.webapp.mako_template_lookup
template = template_lookup.get_template( filename )
template.output_encoding = 'utf-8'
data = dict( caller=self, t=self, trans=self, h=helpers, util=util,
request=self.request, response=self.response, app=self.app )
data.update( self.template_context )
data.update( kwargs )
return template.render( **data )
def stream_template_mako( self, filename, **kwargs ):
template = self.webapp.mako_template_lookup.get_template( filename )
template.output_encoding = 'utf-8'
data = dict( caller=self, t=self, trans=self, h=helpers, util=util, request=self.request, response=self.response, app=self.app )
data.update( self.template_context )
data.update( kwargs )
def render( environ, start_response ):
response_write = start_response( self.response.wsgi_status(), self.response.wsgi_headeritems() )
class StreamBuffer( object ):
def write( self, d ):
response_write( d.encode( 'utf-8' ) )
buffer = StreamBuffer()
context = mako.runtime.Context( buffer, **data )
template.render_context( context )
return []
return render
def fill_template_string(self, template_string, context=None, **kwargs):
"""
Fill in a template, putting any keyword arguments on the context.
"""
template = Template( source=template_string,
searchList=[context or kwargs, dict(caller=self)] )
return str(template)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/web/framework/webapp.py
|
Python
|
gpl-3.0
| 39,566
|
[
"Galaxy"
] |
e013c2d1938bbabc7624475ca99186df0eda54c60d16b99c975570705f7f329c
|
# Copyright (c) 2010, 2012 Luke McCarthy <luke@iogopro.co.uk>
#
# This is free software released under the MIT license.
# See COPYING file for details, or visit:
# http://www.opensource.org/licenses/mit-license.php
#
# The file is part of FSMonitor, a file-system monitoring library.
# https://github.com/shaurz/fsmonitor
from __future__ import print_function
import sys
import threading
import traceback
from .common import FSEvent, FSMonitorError, FSMonitorOSError
# set to None when unloaded
module_loaded = True
if sys.platform.startswith("linux"):
from .linux import FSMonitor
elif sys.platform == "win32":
from .win32 import FSMonitor
else:
from .polling import FSMonitor
class FSMonitorThread(threading.Thread):
def __init__(self, callback=None, autostart=True, fsmonitor_class=None):
threading.Thread.__init__(self)
self.monitor = (fsmonitor_class or FSMonitor)()
self.callback = callback
self._events = []
self._events_lock = threading.Lock()
self.daemon = True
if autostart:
self.start()
else:
self._running = False
def start(self):
self._running = True
super(FSMonitorThread, self).start()
def add_dir_watch(self, path, flags=FSEvent.All, user=None, **kwargs):
return self.monitor.add_dir_watch(path, flags=flags, user=user, **kwargs)
def add_file_watch(self, path, flags=FSEvent.All, user=None, **kwargs):
return self.monitor.add_file_watch(path, flags=flags, user=user, **kwargs)
def remove_watch(self, watch):
self.monitor.remove_watch(watch)
def remove_all_watches(self):
self.monitor.remove_all_watches()
with self._events_lock:
self._events = []
def run(self):
while module_loaded and self._running:
try:
events = self.monitor.read_events()
if self.callback:
for event in events:
self.callback(event)
else:
with self._events_lock:
self._events.extend(events)
except Exception:
print("Exception in FSMonitorThread:\n" + traceback.format_exc())
def stop(self):
if self.monitor.watches:
self.remove_all_watches()
self._running = False
def read_events(self):
with self._events_lock:
events = self._events
self._events = []
return events
__all__ = (
"FSMonitor",
"FSMonitorThread",
"FSMonitorError",
"FSMonitorOSError",
"FSEvent",
)
|
shaurz/fsmonitor
|
fsmonitor/__init__.py
|
Python
|
mit
| 2,655
|
[
"VisIt"
] |
e9953b76371454dd8f1801db98f2f2ade26cb6d2b3dfbfa09f8a9c603be4c360
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for nucelobase pairs.
| Geometries and reference interaction energies from Jurecka et al. PCCP 8 1985 (2006).
| Corrections implemented from footnote 92 of Burns et al., JCP 134 084107 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'HB'`` hydrogen-bonded systems (coplanar base-pairs)
- ``'MX'`` interstrand systems (adjacent base-pairs on different strands)
- ``'DD'`` stacked systems (adjacent base-pairs on same strand)
"""
import qcdb
# <<< JSCH Database Module >>>
dbse = 'JSCH'
# <<< Database Members >>>
HRXN = range(1, 125)
HRXN_SM = [9, 97]
HRXN_LG = [63]
HB = range(1, 39)
MX = range(39, 71)
DD = range(71, 125)
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = -32.06
BIND['%s-%s' % (dbse, 2)] = -31.59
BIND['%s-%s' % (dbse, 3)] = -16.86
BIND['%s-%s' % (dbse, 4)] = -18.16
BIND['%s-%s' % (dbse, 5)] = -33.30
BIND['%s-%s' % (dbse, 6)] = -24.90
BIND['%s-%s' % (dbse, 7)] = -19.10
BIND['%s-%s' % (dbse, 8)] = -51.40
BIND['%s-%s' % (dbse, 9)] = -10.30
BIND['%s-%s' % (dbse, 10)] = -13.70
BIND['%s-%s' % (dbse, 11)] = -29.50
BIND['%s-%s' % (dbse, 12)] = -14.20
BIND['%s-%s' % (dbse, 13)] = -19.50
BIND['%s-%s' % (dbse, 14)] = -19.70
BIND['%s-%s' % (dbse, 15)] = -5.20
BIND['%s-%s' % (dbse, 16)] = -17.80
BIND['%s-%s' % (dbse, 17)] = -16.60
BIND['%s-%s' % (dbse, 18)] = -17.60
BIND['%s-%s' % (dbse, 19)] = -21.30
BIND['%s-%s' % (dbse, 20)] = -21.80
BIND['%s-%s' % (dbse, 21)] = -22.70
BIND['%s-%s' % (dbse, 22)] = -19.40
BIND['%s-%s' % (dbse, 23)] = -18.90
BIND['%s-%s' % (dbse, 24)] = -14.40
BIND['%s-%s' % (dbse, 25)] = -12.80
BIND['%s-%s' % (dbse, 26)] = -18.80
BIND['%s-%s' % (dbse, 27)] = -13.50
BIND['%s-%s' % (dbse, 28)] = -14.50
BIND['%s-%s' % (dbse, 29)] = -13.70
BIND['%s-%s' % (dbse, 30)] = -12.20
BIND['%s-%s' % (dbse, 31)] = -22.80
BIND['%s-%s' % (dbse, 32)] = -12.60
BIND['%s-%s' % (dbse, 33)] = -16.40
BIND['%s-%s' % (dbse, 34)] = -35.80
BIND['%s-%s' % (dbse, 35)] = -18.40
BIND['%s-%s' % (dbse, 36)] = -11.30
BIND['%s-%s' % (dbse, 37)] = -30.70
BIND['%s-%s' % (dbse, 38)] = -31.40
BIND['%s-%s' % (dbse, 39)] = -3.68
BIND['%s-%s' % (dbse, 40)] = -4.82
BIND['%s-%s' % (dbse, 41)] = -2.34
BIND['%s-%s' % (dbse, 42)] = -2.16
BIND['%s-%s' % (dbse, 43)] = 3.09
BIND['%s-%s' % (dbse, 44)] = 1.93
BIND['%s-%s' % (dbse, 45)] = -3.91
BIND['%s-%s' % (dbse, 46)] = 1.24
BIND['%s-%s' % (dbse, 47)] = -0.31
BIND['%s-%s' % (dbse, 48)] = 0.58
BIND['%s-%s' % (dbse, 49)] = -0.47
BIND['%s-%s' % (dbse, 50)] = -0.18
BIND['%s-%s' % (dbse, 51)] = -4.22
BIND['%s-%s' % (dbse, 52)] = -1.15
BIND['%s-%s' % (dbse, 53)] = 0.30
BIND['%s-%s' % (dbse, 54)] = -4.06
BIND['%s-%s' % (dbse, 55)] = 0.88
BIND['%s-%s' % (dbse, 56)] = -0.92
BIND['%s-%s' % (dbse, 57)] = -1.55
BIND['%s-%s' % (dbse, 58)] = 0.70
BIND['%s-%s' % (dbse, 59)] = -1.71
BIND['%s-%s' % (dbse, 60)] = -1.30
BIND['%s-%s' % (dbse, 61)] = -0.70
BIND['%s-%s' % (dbse, 62)] = 1.00
BIND['%s-%s' % (dbse, 63)] = -4.50
BIND['%s-%s' % (dbse, 64)] = 1.40
BIND['%s-%s' % (dbse, 65)] = -4.80
BIND['%s-%s' % (dbse, 66)] = -0.10
BIND['%s-%s' % (dbse, 67)] = -3.00
BIND['%s-%s' % (dbse, 68)] = -5.20
BIND['%s-%s' % (dbse, 69)] = 0.80
BIND['%s-%s' % (dbse, 70)] = 3.10
BIND['%s-%s' % (dbse, 71)] = -19.02
BIND['%s-%s' % (dbse, 72)] = -20.35
BIND['%s-%s' % (dbse, 73)] = -12.30
BIND['%s-%s' % (dbse, 74)] = -14.57
BIND['%s-%s' % (dbse, 75)] = 2.45
BIND['%s-%s' % (dbse, 76)] = -3.85
BIND['%s-%s' % (dbse, 77)] = -8.88
BIND['%s-%s' % (dbse, 78)] = -9.92
BIND['%s-%s' % (dbse, 79)] = 0.32
BIND['%s-%s' % (dbse, 80)] = 0.64
BIND['%s-%s' % (dbse, 81)] = -0.98
BIND['%s-%s' % (dbse, 82)] = -9.10
BIND['%s-%s' % (dbse, 83)] = -9.11
BIND['%s-%s' % (dbse, 84)] = -8.27
BIND['%s-%s' % (dbse, 85)] = -9.43
BIND['%s-%s' % (dbse, 86)] = -7.43
BIND['%s-%s' % (dbse, 87)] = -8.80
BIND['%s-%s' % (dbse, 88)] = -9.11
BIND['%s-%s' % (dbse, 89)] = -8.58
BIND['%s-%s' % (dbse, 90)] = -12.67
BIND['%s-%s' % (dbse, 91)] = -10.22
BIND['%s-%s' % (dbse, 92)] = -11.38
BIND['%s-%s' % (dbse, 93)] = -10.02
BIND['%s-%s' % (dbse, 94)] = -9.79
BIND['%s-%s' % (dbse, 95)] = -10.60
BIND['%s-%s' % (dbse, 96)] = -10.42
BIND['%s-%s' % (dbse, 97)] = -7.46
BIND['%s-%s' % (dbse, 98)] = -12.09
BIND['%s-%s' % (dbse, 99)] = -3.54
BIND['%s-%s' % (dbse, 100)] = -1.62
BIND['%s-%s' % (dbse, 101)] = -6.06
BIND['%s-%s' % (dbse, 102)] = -4.18
BIND['%s-%s' % (dbse, 103)] = -10.80
BIND['%s-%s' % (dbse, 104)] = -7.88
BIND['%s-%s' % (dbse, 105)] = -9.14
BIND['%s-%s' % (dbse, 106)] = -4.69
BIND['%s-%s' % (dbse, 107)] = -7.58
BIND['%s-%s' % (dbse, 108)] = -6.07
BIND['%s-%s' % (dbse, 109)] = -5.67
BIND['%s-%s' % (dbse, 110)] = -4.96
BIND['%s-%s' % (dbse, 111)] = -4.96
BIND['%s-%s' % (dbse, 112)] = -5.44
BIND['%s-%s' % (dbse, 113)] = -6.64
BIND['%s-%s' % (dbse, 114)] = -6.07
BIND['%s-%s' % (dbse, 115)] = -6.25
BIND['%s-%s' % (dbse, 116)] = -3.86
BIND['%s-%s' % (dbse, 117)] = -8.10
BIND['%s-%s' % (dbse, 118)] = -7.90
BIND['%s-%s' % (dbse, 119)] = -6.70
BIND['%s-%s' % (dbse, 120)] = -6.20
BIND['%s-%s' % (dbse, 121)] = -7.70
BIND['%s-%s' % (dbse, 122)] = -6.50
BIND['%s-%s' % (dbse, 123)] = -12.40
BIND['%s-%s' % (dbse, 124)] = -11.60
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = 'HB-01 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 1)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = 'Guanine from G...C WC'
TAGL['%s-%s' % (dbse, 2)] = 'HB-02 mG...mC WC'
TAGL['%s-%s-dimer' % (dbse, 2)] = 'mG...mC WC'
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = 'methyl-Cytosine from mG...mC WC'
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = 'methyl-Guanine from mG...mC WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = 'methyl-Cytosine from mG...mC WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = 'methyl-Guanine from mG...mC WC'
TAGL['%s-%s' % (dbse, 3)] = 'HB-03 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 3)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = 'Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = 'Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = 'Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = 'Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 4)] = 'HB-04 mA...mT H'
TAGL['%s-%s-dimer' % (dbse, 4)] = 'mA...mT H'
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = 'methyl-Adenine from mA...mT H'
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = 'methyl-Thymine from mA...mT H'
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = 'methyl-Adenine from mA...mT H'
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = 'methyl-Thymine from mA...mT H'
TAGL['%s-%s' % (dbse, 5)] = 'HB-05 8oG...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 5)] = '8oG...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = '8-oxo-Guanine from 8oG...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = 'Cytosine from 8oG...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = '8-oxo-Guanine from 8oG...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = 'Cytosine from 8oG...C WC pl'
TAGL['%s-%s' % (dbse, 6)] = 'HB-06 I...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 6)] = 'I...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = 'Cytosine from I...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = 'Inosine from I...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = 'Cytosine from I...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = 'Inosine from I...C WC pl'
TAGL['%s-%s' % (dbse, 7)] = 'HB-07 G...U wobble'
TAGL['%s-%s-dimer' % (dbse, 7)] = 'G...U wobble'
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = 'Guanine from G...U wobble'
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = 'Uracil from G...U wobble'
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = 'Guanine from G...U wobble'
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = 'Uracil from G...U wobble'
TAGL['%s-%s' % (dbse, 8)] = 'HB-08 CCH+'
TAGL['%s-%s-dimer' % (dbse, 8)] = 'CCH+'
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = 'Cytosine from CCH+'
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = 'protonated-Cytosine from CCH+'
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = 'Cytosine from CCH+'
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = 'protonated-Cytosine from CCH+'
TAGL['%s-%s' % (dbse, 9)] = 'HB-09 U...U Calcutta pl'
TAGL['%s-%s-dimer' % (dbse, 9)] = 'U...U Calcutta pl'
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s' % (dbse, 10)] = 'HB-10 U...U pl'
TAGL['%s-%s-dimer' % (dbse, 10)] = 'U...U pl'
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s' % (dbse, 11)] = 'HB-11 6tG...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 11)] = '6tG...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = 'Cytosine from 6tG...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = '6-thio-Guanine from 6tG...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = 'Cytosine from 6tG...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = '6-thio-Guanine from 6tG...C WC pl'
TAGL['%s-%s' % (dbse, 12)] = 'HB-12 A...4tU WC'
TAGL['%s-%s-dimer' % (dbse, 12)] = 'A...4tU WC'
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = 'Adenine from A...4tU WC'
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = '4-thio-Uracil from A...4tU WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = 'Adenine from A...4tU WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = '4-thio-Uracil from A...4tU WC'
TAGL['%s-%s' % (dbse, 13)] = 'HB-13 2-aminoA...T'
TAGL['%s-%s-dimer' % (dbse, 13)] = '2-aminoA...T'
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = '2-amino-Adenine from 2-aminoA...T'
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = 'Thymine from 2-aminoA...T'
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = '2-amino-Adenine from 2-aminoA...T'
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = 'Thymine from 2-aminoA...T'
TAGL['%s-%s' % (dbse, 14)] = 'HB-14 2-aminoA...T pl'
TAGL['%s-%s-dimer' % (dbse, 14)] = '2-aminoA...T pl'
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = '2-amino-Adenine from 2-aminoA...T pl'
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = 'Thymine from 2-aminoA...T pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = '2-amino-Adenine from 2-aminoA...T pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = 'Thymine from 2-aminoA...T pl'
TAGL['%s-%s' % (dbse, 15)] = 'HB-15 A...F'
TAGL['%s-%s-dimer' % (dbse, 15)] = 'A...F'
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = 'Adenine from A...F'
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = 'difluorotoluene from A...F'
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = 'Adenine from A...F'
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = 'difluorotoluene from A...F'
TAGL['%s-%s' % (dbse, 16)] = 'HB-16 G...4tU'
TAGL['%s-%s-dimer' % (dbse, 16)] = 'G...4tU'
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = 'Guanine from G...4tU'
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = '4-thio-Uracil from G...4tU'
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = 'Guanine from G...4tU'
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = '4-thio-Uracil from G...4tU'
TAGL['%s-%s' % (dbse, 17)] = 'HB-17 G...2tU'
TAGL['%s-%s-dimer' % (dbse, 17)] = 'G...2tU'
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = 'Guanine from G...2tU'
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = '2-thio-Uracil from G...2tU'
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = 'Guanine from G...2tU'
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = '2-thio-Uracil from G...2tU'
TAGL['%s-%s' % (dbse, 18)] = 'HB-18 A...C pl'
TAGL['%s-%s-dimer' % (dbse, 18)] = 'A...C pl'
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = 'Cytosine from A...C pl'
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = 'Adenine from A...C pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = 'Cytosine from A...C pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = 'Adenine from A...C pl'
TAGL['%s-%s' % (dbse, 19)] = 'HB-19 G...G pl'
TAGL['%s-%s-dimer' % (dbse, 19)] = 'G...G pl'
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s' % (dbse, 20)] = 'HB-20 G...6tG pl'
TAGL['%s-%s-dimer' % (dbse, 20)] = 'G...6tG pl'
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = 'Guanine from G...6tG pl'
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = '6-thio-Guanine from G...6tG pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = 'Guanine from G...6tG pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = '6-thio-Guanine from G...6tG pl'
TAGL['%s-%s' % (dbse, 21)] = 'HB-21 6tG...G pl'
TAGL['%s-%s-dimer' % (dbse, 21)] = '6tG...G pl'
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = '6-thio-Guanine from 6tG...G pl'
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = 'Guanine from 6tG...G pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = '6-thio-Guanine from 6tG...G pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = 'Guanine from 6tG...G pl'
TAGL['%s-%s' % (dbse, 22)] = 'HB-22 G...A 1'
TAGL['%s-%s-dimer' % (dbse, 22)] = 'G...A 1'
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = 'Guanine from G...A 1'
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = 'Adenine from G...A 1'
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = 'Guanine from G...A 1'
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = 'Adenine from G...A 1'
TAGL['%s-%s' % (dbse, 23)] = 'HB-23 G...A 1 pl'
TAGL['%s-%s-dimer' % (dbse, 23)] = 'G...A 1 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = 'Adenine from G...A 1 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = 'Guanine from G...A 1 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = 'Adenine from G...A 1 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = 'Guanine from G...A 1 pl'
TAGL['%s-%s' % (dbse, 24)] = 'HB-24 G...A 2'
TAGL['%s-%s-dimer' % (dbse, 24)] = 'G...A 2'
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = 'Guanine from G...A 2'
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = 'Adenine from G...A 2'
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = 'Guanine from G...A 2'
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = 'Adenine from G...A 2'
TAGL['%s-%s' % (dbse, 25)] = 'HB-25 G...A 2 pl'
TAGL['%s-%s-dimer' % (dbse, 25)] = 'G...A 2 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 25)] = 'Guanine from G...A 2 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 25)] = 'Adenine from G...A 2 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 25)] = 'Guanine from G...A 2 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 25)] = 'Adenine from G...A 2 pl'
TAGL['%s-%s' % (dbse, 26)] = 'HB-26 G...A 3'
TAGL['%s-%s-dimer' % (dbse, 26)] = 'G...A 3'
TAGL['%s-%s-monoA-CP' % (dbse, 26)] = 'Guanine from G...A 3'
TAGL['%s-%s-monoB-CP' % (dbse, 26)] = 'Adenine from G...A 3'
TAGL['%s-%s-monoA-unCP' % (dbse, 26)] = 'Guanine from G...A 3'
TAGL['%s-%s-monoB-unCP' % (dbse, 26)] = 'Adenine from G...A 3'
TAGL['%s-%s' % (dbse, 27)] = 'HB-27 G...A 4'
TAGL['%s-%s-dimer' % (dbse, 27)] = 'G...A 4'
TAGL['%s-%s-monoA-CP' % (dbse, 27)] = 'Guanine from G...A 4'
TAGL['%s-%s-monoB-CP' % (dbse, 27)] = 'Adenine from G...A 4'
TAGL['%s-%s-monoA-unCP' % (dbse, 27)] = 'Guanine from G...A 4'
TAGL['%s-%s-monoB-unCP' % (dbse, 27)] = 'Adenine from G...A 4'
TAGL['%s-%s' % (dbse, 28)] = 'HB-28 A...A 1 pl'
TAGL['%s-%s-dimer' % (dbse, 28)] = 'A...A 1 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s' % (dbse, 29)] = 'HB-29 A...A 2 pl'
TAGL['%s-%s-dimer' % (dbse, 29)] = 'A...A 2 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s' % (dbse, 30)] = 'HB-30 A...A 3 pl'
TAGL['%s-%s-dimer' % (dbse, 30)] = 'A...A 3 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s' % (dbse, 31)] = 'HB-31 8oG...G'
TAGL['%s-%s-dimer' % (dbse, 31)] = '8oG...G'
TAGL['%s-%s-monoA-CP' % (dbse, 31)] = 'Guanine from 8oG...G'
TAGL['%s-%s-monoB-CP' % (dbse, 31)] = '8-oxo-Guanine from 8oG...G'
TAGL['%s-%s-monoA-unCP' % (dbse, 31)] = 'Guanine from 8oG...G'
TAGL['%s-%s-monoB-unCP' % (dbse, 31)] = '8-oxo-Guanine from 8oG...G'
TAGL['%s-%s' % (dbse, 32)] = 'HB-32 2tU....2tU pl'
TAGL['%s-%s-dimer' % (dbse, 32)] = '2tU....2tU pl'
TAGL['%s-%s-monoA-CP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoB-CP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s' % (dbse, 33)] = 'HB-33 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 33)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 33)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 33)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 33)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 33)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 34)] = 'HB-34 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 34)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 34)] = 'methyl-Cytosine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 34)] = 'methyl-Guanine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 34)] = 'methyl-Cytosine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 34)] = 'methyl-Guanine from G...C WC'
TAGL['%s-%s' % (dbse, 35)] = 'HB-35 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 35)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 35)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 35)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 35)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 35)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 36)] = 'HB-36 G...A HB'
TAGL['%s-%s-dimer' % (dbse, 36)] = 'G...A HB'
TAGL['%s-%s-monoA-CP' % (dbse, 36)] = 'Guanine from G...A HB'
TAGL['%s-%s-monoB-CP' % (dbse, 36)] = 'Adenine from G...A HB'
TAGL['%s-%s-monoA-unCP' % (dbse, 36)] = 'Guanine from G...A HB'
TAGL['%s-%s-monoB-unCP' % (dbse, 36)] = 'Adenine from G...A HB'
TAGL['%s-%s' % (dbse, 37)] = 'HB-37 C...G WC'
TAGL['%s-%s-dimer' % (dbse, 37)] = 'C...G WC'
TAGL['%s-%s-monoA-CP' % (dbse, 37)] = 'Cytosine from C...G WC'
TAGL['%s-%s-monoB-CP' % (dbse, 37)] = 'Guanine from C...G WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 37)] = 'Cytosine from C...G WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 37)] = 'Guanine from C...G WC'
TAGL['%s-%s' % (dbse, 38)] = 'HB-38 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 38)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 38)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 38)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 38)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 38)] = 'Cytosine from G...C WC'
TAGL['%s-%s' % (dbse, 39)] = 'IS-01 GG0/3.36 CGis036'
TAGL['%s-%s-dimer' % (dbse, 39)] = 'GG0/3.36 CGis036'
TAGL['%s-%s-monoA-CP' % (dbse, 39)] = 'Guanine from GG0/3.36 CGis036'
TAGL['%s-%s-monoB-CP' % (dbse, 39)] = 'Cytosine from GG0/3.36 CGis036'
TAGL['%s-%s-monoA-unCP' % (dbse, 39)] = 'Guanine from GG0/3.36 CGis036'
TAGL['%s-%s-monoB-unCP' % (dbse, 39)] = 'Cytosine from GG0/3.36 CGis036'
TAGL['%s-%s' % (dbse, 40)] = 'IS-02 GG0/3.36 GCis036'
TAGL['%s-%s-dimer' % (dbse, 40)] = 'GG0/3.36 GCis036'
TAGL['%s-%s-monoA-CP' % (dbse, 40)] = 'Cytosine from GG0/3.36 GCis036'
TAGL['%s-%s-monoB-CP' % (dbse, 40)] = 'Guanine from GG0/3.36 GCis036'
TAGL['%s-%s-monoA-unCP' % (dbse, 40)] = 'Cytosine from GG0/3.36 GCis036'
TAGL['%s-%s-monoB-unCP' % (dbse, 40)] = 'Guanine from GG0/3.36 GCis036'
TAGL['%s-%s' % (dbse, 41)] = 'IS-03 AA20/3.05 ATis2005'
TAGL['%s-%s-dimer' % (dbse, 41)] = 'AA20/3.05 ATis2005'
TAGL['%s-%s-monoA-CP' % (dbse, 41)] = 'Adenine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoB-CP' % (dbse, 41)] = 'Thymine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 41)] = 'Adenine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 41)] = 'Thymine from AA20/3.05 ATis2005'
TAGL['%s-%s' % (dbse, 42)] = 'IS-04 AA20/3.05 TAis2005'
TAGL['%s-%s-dimer' % (dbse, 42)] = 'AA20/3.05 TAis2005'
TAGL['%s-%s-monoA-CP' % (dbse, 42)] = 'Thymine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoB-CP' % (dbse, 42)] = 'Adenine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 42)] = 'Thymine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 42)] = 'Adenine from AA20/3.05 TAis2005'
TAGL['%s-%s' % (dbse, 43)] = 'IS-05 GC0/3.25 C//Cis'
TAGL['%s-%s-dimer' % (dbse, 43)] = 'GC0/3.25 C//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s' % (dbse, 44)] = 'IS-06 GC0/3.25 G//Gis'
TAGL['%s-%s-dimer' % (dbse, 44)] = 'GC0/3.25 G//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s' % (dbse, 45)] = 'IS-07 CG0/3.19 G//Gis'
TAGL['%s-%s-dimer' % (dbse, 45)] = 'CG0/3.19 G//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s' % (dbse, 46)] = 'IS-08 CG0/3.19 C//Cis'
TAGL['%s-%s-dimer' % (dbse, 46)] = 'CG0/3.19 C//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s' % (dbse, 47)] = 'IS-09 GA10/3.15 A//Cis'
TAGL['%s-%s-dimer' % (dbse, 47)] = 'GA10/3.15 A//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 47)] = 'Adenine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 47)] = 'Cytosine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 47)] = 'Adenine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 47)] = 'Cytosine from GA10/3.15 A//Cis'
TAGL['%s-%s' % (dbse, 48)] = 'IS-10 GA10/3.15 T//Gis'
TAGL['%s-%s-dimer' % (dbse, 48)] = 'GA10/3.15 T//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 48)] = 'Thymine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 48)] = 'Guanine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 48)] = 'Thymine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 48)] = 'Guanine from GA10/3.15 T//Gis'
TAGL['%s-%s' % (dbse, 49)] = 'IS-11 AG08/3.19 T//Gis'
TAGL['%s-%s-dimer' % (dbse, 49)] = 'AG08/3.19 T//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 49)] = 'Guanine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 49)] = 'Thymine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 49)] = 'Guanine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 49)] = 'Thymine from AG08/3.19 T//Gis'
TAGL['%s-%s' % (dbse, 50)] = 'IS-12 AG08/3.19 A//Cis'
TAGL['%s-%s-dimer' % (dbse, 50)] = 'AG08/3.19 A//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 50)] = 'Adenine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 50)] = 'Cytosine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 50)] = 'Adenine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 50)] = 'Cytosine from AG08/3.19 A//Cis'
TAGL['%s-%s' % (dbse, 51)] = 'IS-13 TG03.19 A//Gis'
TAGL['%s-%s-dimer' % (dbse, 51)] = 'TG03.19 A//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 51)] = 'Adenine from TG03.19 A//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 51)] = 'Guanine from TG03.19 A//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 51)] = 'Adenine from TG03.19 A//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 51)] = 'Guanine from TG03.19 A//Gis'
TAGL['%s-%s' % (dbse, 52)] = 'IS-14 TG03.19 T//Cis'
TAGL['%s-%s-dimer' % (dbse, 52)] = 'TG03.19 T//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 52)] = 'Thymine from TG03.19 T//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 52)] = 'Cytosine from TG03.19 T//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 52)] = 'Thymine from TG03.19 T//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 52)] = 'Cytosine from TG03.19 T//Cis'
TAGL['%s-%s' % (dbse, 53)] = 'IS-15 GT10/3.15 T//Cis'
TAGL['%s-%s-dimer' % (dbse, 53)] = 'GT10/3.15 T//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 53)] = 'Thymine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 53)] = 'Cytosine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 53)] = 'Thymine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 53)] = 'Cytosine from GT10/3.15 T//Cis'
TAGL['%s-%s' % (dbse, 54)] = 'IS-16 GT10/3.15 A//Gis'
TAGL['%s-%s-dimer' % (dbse, 54)] = 'GT10/3.15 A//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 54)] = 'Adenine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 54)] = 'Guanine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 54)] = 'Adenine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 54)] = 'Guanine from GT10/3.15 A//Gis'
TAGL['%s-%s' % (dbse, 55)] = 'IS-17 AT10/3.26 T//Tis'
TAGL['%s-%s-dimer' % (dbse, 55)] = 'AT10/3.26 T//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s' % (dbse, 56)] = 'IS-18 AT10/3.26 A//Ais'
TAGL['%s-%s-dimer' % (dbse, 56)] = 'AT10/3.26 A//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s' % (dbse, 57)] = 'IS-19 TA08/3.16 A//Ais'
TAGL['%s-%s-dimer' % (dbse, 57)] = 'TA08/3.16 A//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s' % (dbse, 58)] = 'IS-20 TA08/3.16 T//Tis'
TAGL['%s-%s-dimer' % (dbse, 58)] = 'TA08/3.16 T//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s' % (dbse, 59)] = 'IS-21 AA0/3.24 A//Tis'
TAGL['%s-%s-dimer' % (dbse, 59)] = 'AA0/3.24 A//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 59)] = 'Adenine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 59)] = 'Thymine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 59)] = 'Adenine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 59)] = 'Thymine from AA0/3.24 A//Tis'
TAGL['%s-%s' % (dbse, 60)] = 'IS-22 AA0/3.24 T//Ais'
TAGL['%s-%s-dimer' % (dbse, 60)] = 'AA0/3.24 T//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 60)] = 'Adenine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 60)] = 'Thymine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 60)] = 'Adenine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 60)] = 'Thymine from AA0/3.24 T//Ais'
TAGL['%s-%s' % (dbse, 61)] = 'IS-23 A...A IS'
TAGL['%s-%s-dimer' % (dbse, 61)] = 'A...A IS'
TAGL['%s-%s-monoA-CP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoB-CP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s' % (dbse, 62)] = 'IS-24 T...T IS'
TAGL['%s-%s-dimer' % (dbse, 62)] = 'T...T IS'
TAGL['%s-%s-monoA-CP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoB-CP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s' % (dbse, 63)] = 'IS-25 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 63)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 64)] = 'IS-26 C...C IS'
TAGL['%s-%s-dimer' % (dbse, 64)] = 'C...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s' % (dbse, 65)] = 'IS-27 A...G IS'
TAGL['%s-%s-dimer' % (dbse, 65)] = 'A...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 65)] = 'methyl-Adenine from A...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 65)] = 'methyl-Guanine from A...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 65)] = 'methyl-Adenine from A...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 65)] = 'methyl-Guanine from A...G IS'
TAGL['%s-%s' % (dbse, 66)] = 'IS-28 T...C IS'
TAGL['%s-%s-dimer' % (dbse, 66)] = 'T...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 66)] = 'methyl-Cytosine from T...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 66)] = 'methyl-Thymine from T...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 66)] = 'methyl-Cytosine from T...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 66)] = 'methyl-Thymine from T...C IS'
TAGL['%s-%s' % (dbse, 67)] = 'IS-29 C...A IS'
TAGL['%s-%s-dimer' % (dbse, 67)] = 'C...A IS'
TAGL['%s-%s-monoA-CP' % (dbse, 67)] = 'Cytosine from C...A IS'
TAGL['%s-%s-monoB-CP' % (dbse, 67)] = 'Adenine from C...A IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 67)] = 'Cytosine from C...A IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 67)] = 'Adenine from C...A IS'
TAGL['%s-%s' % (dbse, 68)] = 'IS-30 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 68)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 69)] = 'IS-31 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 69)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 70)] = 'IS-32 C...C IS'
TAGL['%s-%s-dimer' % (dbse, 70)] = 'C...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s' % (dbse, 71)] = 'ST-01 G...C S'
TAGL['%s-%s-dimer' % (dbse, 71)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 71)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 71)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 71)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 71)] = 'Cytosine from G...C S'
TAGL['%s-%s' % (dbse, 72)] = 'ST-02 mG...mC S'
TAGL['%s-%s-dimer' % (dbse, 72)] = 'mG...mC S'
TAGL['%s-%s-monoA-CP' % (dbse, 72)] = 'methyl-Guanine from mG...mC S'
TAGL['%s-%s-monoB-CP' % (dbse, 72)] = 'methyl-Cytosine from mG...mC S'
TAGL['%s-%s-monoA-unCP' % (dbse, 72)] = 'methyl-Guanine from mG...mC S'
TAGL['%s-%s-monoB-unCP' % (dbse, 72)] = 'methyl-Cytosine from mG...mC S'
TAGL['%s-%s' % (dbse, 73)] = 'ST-03 A...T S'
TAGL['%s-%s-dimer' % (dbse, 73)] = 'A...T S'
TAGL['%s-%s-monoA-CP' % (dbse, 73)] = 'Adenine from A...T S'
TAGL['%s-%s-monoB-CP' % (dbse, 73)] = 'Thymine from A...T S'
TAGL['%s-%s-monoA-unCP' % (dbse, 73)] = 'Adenine from A...T S'
TAGL['%s-%s-monoB-unCP' % (dbse, 73)] = 'Thymine from A...T S'
TAGL['%s-%s' % (dbse, 74)] = 'ST-04 mA...mT S'
TAGL['%s-%s-dimer' % (dbse, 74)] = 'mA...mT S'
TAGL['%s-%s-monoA-CP' % (dbse, 74)] = 'methyl-Adenine from mA...mT S'
TAGL['%s-%s-monoB-CP' % (dbse, 74)] = 'methyl-Thymine from mA...mT S'
TAGL['%s-%s-monoA-unCP' % (dbse, 74)] = 'methyl-Adenine from mA...mT S'
TAGL['%s-%s-monoB-unCP' % (dbse, 74)] = 'methyl-Thymine from mA...mT S'
TAGL['%s-%s' % (dbse, 75)] = 'ST-05 CC1'
TAGL['%s-%s-dimer' % (dbse, 75)] = 'CC1'
TAGL['%s-%s-monoA-CP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoB-CP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoA-unCP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoB-unCP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s' % (dbse, 76)] = 'ST-06 CC2'
TAGL['%s-%s-dimer' % (dbse, 76)] = 'CC2'
TAGL['%s-%s-monoA-CP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoB-CP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoA-unCP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoB-unCP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s' % (dbse, 77)] = 'ST-07 CC3'
TAGL['%s-%s-dimer' % (dbse, 77)] = 'CC3'
TAGL['%s-%s-monoA-CP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoB-CP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoA-unCP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoB-unCP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s' % (dbse, 78)] = 'ST-08 CC4'
TAGL['%s-%s-dimer' % (dbse, 78)] = 'CC4'
TAGL['%s-%s-monoA-CP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoB-CP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoA-unCP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoB-unCP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s' % (dbse, 79)] = 'ST-09 CC5'
TAGL['%s-%s-dimer' % (dbse, 79)] = 'CC5'
TAGL['%s-%s-monoA-CP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoB-CP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoA-unCP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoB-unCP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s' % (dbse, 80)] = 'ST-10 CC6'
TAGL['%s-%s-dimer' % (dbse, 80)] = 'CC6'
TAGL['%s-%s-monoA-CP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoB-CP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoA-unCP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoB-unCP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s' % (dbse, 81)] = 'ST-11 CC7'
TAGL['%s-%s-dimer' % (dbse, 81)] = 'CC7'
TAGL['%s-%s-monoA-CP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoB-CP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoA-unCP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoB-unCP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s' % (dbse, 82)] = 'ST-12 CC8'
TAGL['%s-%s-dimer' % (dbse, 82)] = 'CC8'
TAGL['%s-%s-monoA-CP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoB-CP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoA-unCP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoB-unCP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s' % (dbse, 83)] = 'ST-13 CC9'
TAGL['%s-%s-dimer' % (dbse, 83)] = 'CC9'
TAGL['%s-%s-monoA-CP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoB-CP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoA-unCP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoB-unCP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s' % (dbse, 84)] = 'ST-14 CC10'
TAGL['%s-%s-dimer' % (dbse, 84)] = 'CC10'
TAGL['%s-%s-monoA-CP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoB-CP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoA-unCP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoB-unCP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s' % (dbse, 85)] = 'ST-15 CC11'
TAGL['%s-%s-dimer' % (dbse, 85)] = 'CC11'
TAGL['%s-%s-monoA-CP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoB-CP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoA-unCP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoB-unCP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s' % (dbse, 86)] = 'ST-16 CC12'
TAGL['%s-%s-dimer' % (dbse, 86)] = 'CC12'
TAGL['%s-%s-monoA-CP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoB-CP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoA-unCP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoB-unCP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s' % (dbse, 87)] = 'ST-17 CC13'
TAGL['%s-%s-dimer' % (dbse, 87)] = 'CC13'
TAGL['%s-%s-monoA-CP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoB-CP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoA-unCP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoB-unCP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s' % (dbse, 88)] = 'ST-18 CC14'
TAGL['%s-%s-dimer' % (dbse, 88)] = 'CC14'
TAGL['%s-%s-monoA-CP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoB-CP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoA-unCP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoB-unCP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s' % (dbse, 89)] = 'ST-19 AAst'
TAGL['%s-%s-dimer' % (dbse, 89)] = 'AAst'
TAGL['%s-%s-monoA-CP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoB-CP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoA-unCP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoB-unCP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s' % (dbse, 90)] = 'ST-20 GGst'
TAGL['%s-%s-dimer' % (dbse, 90)] = 'GGst'
TAGL['%s-%s-monoA-CP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoB-CP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoA-unCP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoB-unCP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s' % (dbse, 91)] = 'ST-21 ACst'
TAGL['%s-%s-dimer' % (dbse, 91)] = 'ACst'
TAGL['%s-%s-monoA-CP' % (dbse, 91)] = 'Adenine from ACst'
TAGL['%s-%s-monoB-CP' % (dbse, 91)] = 'Cytosine from ACst'
TAGL['%s-%s-monoA-unCP' % (dbse, 91)] = 'Adenine from ACst'
TAGL['%s-%s-monoB-unCP' % (dbse, 91)] = 'Cytosine from ACst'
TAGL['%s-%s' % (dbse, 92)] = 'ST-22 GAst'
TAGL['%s-%s-dimer' % (dbse, 92)] = 'GAst'
TAGL['%s-%s-monoA-CP' % (dbse, 92)] = 'Guanine from GAst'
TAGL['%s-%s-monoB-CP' % (dbse, 92)] = 'Adenine from GAst'
TAGL['%s-%s-monoA-unCP' % (dbse, 92)] = 'Guanine from GAst'
TAGL['%s-%s-monoB-unCP' % (dbse, 92)] = 'Adenine from GAst'
TAGL['%s-%s' % (dbse, 93)] = 'ST-23 CCst'
TAGL['%s-%s-dimer' % (dbse, 93)] = 'CCst'
TAGL['%s-%s-monoA-CP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoB-CP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoA-unCP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoB-unCP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s' % (dbse, 94)] = 'ST-24 AUst'
TAGL['%s-%s-dimer' % (dbse, 94)] = 'AUst'
TAGL['%s-%s-monoA-CP' % (dbse, 94)] = 'Adenine from AUst'
TAGL['%s-%s-monoB-CP' % (dbse, 94)] = 'Uracil from AUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 94)] = 'Adenine from AUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 94)] = 'Uracil from AUst'
TAGL['%s-%s' % (dbse, 95)] = 'ST-25 GCst'
TAGL['%s-%s-dimer' % (dbse, 95)] = 'GCst'
TAGL['%s-%s-monoA-CP' % (dbse, 95)] = 'Guanine from GCst'
TAGL['%s-%s-monoB-CP' % (dbse, 95)] = 'Cytosine from GCst'
TAGL['%s-%s-monoA-unCP' % (dbse, 95)] = 'Guanine from GCst'
TAGL['%s-%s-monoB-unCP' % (dbse, 95)] = 'Cytosine from GCst'
TAGL['%s-%s' % (dbse, 96)] = 'ST-26 CUst'
TAGL['%s-%s-dimer' % (dbse, 96)] = 'CUst'
TAGL['%s-%s-monoA-CP' % (dbse, 96)] = 'Cytosine from CUst'
TAGL['%s-%s-monoB-CP' % (dbse, 96)] = 'Uracil from CUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 96)] = 'Cytosine from CUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 96)] = 'Uracil from CUst'
TAGL['%s-%s' % (dbse, 97)] = 'ST-27 UUst'
TAGL['%s-%s-dimer' % (dbse, 97)] = 'UUst'
TAGL['%s-%s-monoA-CP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoB-CP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s' % (dbse, 98)] = 'ST-28 GUst'
TAGL['%s-%s-dimer' % (dbse, 98)] = 'GUst'
TAGL['%s-%s-monoA-CP' % (dbse, 98)] = 'Guanine from GUst'
TAGL['%s-%s-monoB-CP' % (dbse, 98)] = 'Uracil from GUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 98)] = 'Guanine from GUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 98)] = 'Uracil from GUst'
TAGL['%s-%s' % (dbse, 99)] = 'ST-29 GG0/3.36 GGs036'
TAGL['%s-%s-dimer' % (dbse, 99)] = 'GGs036'
TAGL['%s-%s-monoA-CP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoB-CP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoA-unCP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoB-unCP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s' % (dbse, 100)] = 'ST-30 GG0/3.36 CCs036'
TAGL['%s-%s-dimer' % (dbse, 100)] = 'CCs036'
TAGL['%s-%s-monoA-CP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoB-CP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoA-unCP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoB-unCP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s' % (dbse, 101)] = 'ST-31 AA20/3.05 AAs2005'
TAGL['%s-%s-dimer' % (dbse, 101)] = 'AAs2005'
TAGL['%s-%s-monoA-CP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoB-CP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s' % (dbse, 102)] = 'ST-32 AA20/3.05 TTs2005'
TAGL['%s-%s-dimer' % (dbse, 102)] = 'TTs2005'
TAGL['%s-%s-monoA-CP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoB-CP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s' % (dbse, 103)] = 'ST-33 GC0/3.25 G//Cs'
TAGL['%s-%s-dimer' % (dbse, 103)] = 'GC0/3.25 G//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 103)] = 'Cytosine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 103)] = 'Guanine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 103)] = 'Cytosine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 103)] = 'Guanine from GC0/3.25 G//Cs'
TAGL['%s-%s' % (dbse, 104)] = 'ST-34 CG0/3.19 G//Cs'
TAGL['%s-%s-dimer' % (dbse, 104)] = 'CG0/3.19 G//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 104)] = 'Cytosine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 104)] = 'Guanine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 104)] = 'Cytosine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 104)] = 'Guanine from CG0/3.19 G//Cs'
TAGL['%s-%s' % (dbse, 105)] = 'ST-35 GA10/3.15 A//Gs'
TAGL['%s-%s-dimer' % (dbse, 105)] = 'GA10/3.15 A//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 105)] = 'Adenine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 105)] = 'Guanine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 105)] = 'Adenine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 105)] = 'Guanine from GA10/3.15 A//Gs'
TAGL['%s-%s' % (dbse, 106)] = 'ST-36 GA10/3.15 T//Cs'
TAGL['%s-%s-dimer' % (dbse, 106)] = 'GA10/3.15 T//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 106)] = 'Thymine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 106)] = 'Cytosine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 106)] = 'Thymine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 106)] = 'Cytosine from GA10/3.15 T//Cs'
TAGL['%s-%s' % (dbse, 107)] = 'ST-37 AG08/3.19 A//Gs'
TAGL['%s-%s-dimer' % (dbse, 107)] = 'AG08/3.19 A//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 107)] = 'Adenine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 107)] = 'Guanine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 107)] = 'Adenine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 107)] = 'Guanine from AG08/3.19 A//Gs'
TAGL['%s-%s' % (dbse, 108)] = 'ST-38 AG08/3.19 T//Cs'
TAGL['%s-%s-dimer' % (dbse, 108)] = 'AG08/3.19 T//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 108)] = 'Thymine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 108)] = 'Cytosine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 108)] = 'Thymine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 108)] = 'Cytosine from AG08/3.19 T//Cs'
TAGL['%s-%s' % (dbse, 109)] = 'ST-39 TG03.19 T//Gs'
TAGL['%s-%s-dimer' % (dbse, 109)] = 'TG03.19 T//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 109)] = 'Thymine from TG03.19 T//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 109)] = 'Guanine from TG03.19 T//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 109)] = 'Thymine from TG03.19 T//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 109)] = 'Guanine from TG03.19 T//Gs'
TAGL['%s-%s' % (dbse, 110)] = 'ST-40 TG03.19 A//Cs'
TAGL['%s-%s-dimer' % (dbse, 110)] = 'TG03.19 A//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 110)] = 'Adenine from TG03.19 A//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 110)] = 'Cytosine from TG03.19 A//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 110)] = 'Adenine from TG03.19 A//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 110)] = 'Cytosine from TG03.19 A//Cs'
TAGL['%s-%s' % (dbse, 111)] = 'ST-41 GT10/3.15 T//Gs'
TAGL['%s-%s-dimer' % (dbse, 111)] = 'GT10/3.15 T//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 111)] = 'Thymine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 111)] = 'Guanine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 111)] = 'Thymine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 111)] = 'Guanine from GT10/3.15 T//Gs'
TAGL['%s-%s' % (dbse, 112)] = 'ST-42 GT10/3.15 A//Cs'
TAGL['%s-%s-dimer' % (dbse, 112)] = 'GT10/3.15 A//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 112)] = 'Adenine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 112)] = 'Cytosine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 112)] = 'Adenine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 112)] = 'Cytosine from GT10/3.15 A//Cs'
TAGL['%s-%s' % (dbse, 113)] = 'ST-43 AT10/3.26 A//Ts'
TAGL['%s-%s-dimer' % (dbse, 113)] = 'AT10/3.26 A//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 113)] = 'Adenine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 113)] = 'Thymine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 113)] = 'Adenine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 113)] = 'Thymine from AT10/3.26 A//Ts'
TAGL['%s-%s' % (dbse, 114)] = 'ST-44 TA08/3.16 A//Ts'
TAGL['%s-%s-dimer' % (dbse, 114)] = 'TA08/3.16 A//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 114)] = 'Adenine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 114)] = 'Thymine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 114)] = 'Adenine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 114)] = 'Thymine from TA08/3.16 A//Ts'
TAGL['%s-%s' % (dbse, 115)] = 'ST-45 AA0/3.24 A//As'
TAGL['%s-%s-dimer' % (dbse, 115)] = 'AA0/3.24 A//As'
TAGL['%s-%s-monoA-CP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoB-CP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoA-unCP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoB-unCP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s' % (dbse, 116)] = 'ST-46 AA0/3.24 T//Ts'
TAGL['%s-%s-dimer' % (dbse, 116)] = 'AA0/3.24 T//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s' % (dbse, 117)] = 'ST-47 A...T S'
TAGL['%s-%s-dimer' % (dbse, 117)] = 'A...T S'
TAGL['%s-%s-monoA-CP' % (dbse, 117)] = 'methyl-Adenine from A...T S'
TAGL['%s-%s-monoB-CP' % (dbse, 117)] = 'methyl-Thymine from A...T S'
TAGL['%s-%s-monoA-unCP' % (dbse, 117)] = 'methyl-Adenine from A...T S'
TAGL['%s-%s-monoB-unCP' % (dbse, 117)] = 'methyl-Thymine from A...T S'
TAGL['%s-%s' % (dbse, 118)] = 'ST-48 G...C S'
TAGL['%s-%s-dimer' % (dbse, 118)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 118)] = 'methyl-Cytosine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 118)] = 'methyl-Guanine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 118)] = 'methyl-Cytosine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 118)] = 'methyl-Guanine from G...C S'
TAGL['%s-%s' % (dbse, 119)] = 'ST-49 A...C S'
TAGL['%s-%s-dimer' % (dbse, 119)] = 'A...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 119)] = 'methyl-Adenine from A...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 119)] = 'methyl-Cytosine from A...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 119)] = 'methyl-Adenine from A...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 119)] = 'methyl-Cytosine from A...C S'
TAGL['%s-%s' % (dbse, 120)] = 'ST-50 T...G S'
TAGL['%s-%s-dimer' % (dbse, 120)] = 'T...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 120)] = 'methyl-Thymine from T...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 120)] = 'methyl-Guanine from T...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 120)] = 'methyl-Thymine from T...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 120)] = 'methyl-Guanine from T...G S'
TAGL['%s-%s' % (dbse, 121)] = 'ST-51 G...C S'
TAGL['%s-%s-dimer' % (dbse, 121)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 121)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 121)] = 'Guanine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 121)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 121)] = 'Guanine from G...C S'
TAGL['%s-%s' % (dbse, 122)] = 'ST-52 A...G S'
TAGL['%s-%s-dimer' % (dbse, 122)] = 'A...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 122)] = 'Adenine from A...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 122)] = 'Guanine from A...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 122)] = 'Adenine from A...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 122)] = 'Guanine from A...G S'
TAGL['%s-%s' % (dbse, 123)] = 'ST-53 C...G S'
TAGL['%s-%s-dimer' % (dbse, 123)] = 'C...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 123)] = 'Guanine from C...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 123)] = 'Cytosine from C...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 123)] = 'Guanine from C...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 123)] = 'Cytosine from C...G S'
TAGL['%s-%s' % (dbse, 124)] = 'ST-54 G...C S'
TAGL['%s-%s-dimer' % (dbse, 124)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 124)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 124)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 124)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 124)] = 'Cytosine from G...C S'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
C -1.0398599 -0.0950435 2.9628987
N -0.8760506 -0.1198953 4.3522101
C 0.3372729 -0.0573522 4.9526643
C 1.4603152 0.0294729 4.2021231
C 1.2876371 0.0522766 2.7771415
N 0.0866353 -0.0006919 2.2061593
O -2.1779850 -0.1592983 2.4996990
N 2.3517978 0.1313296 1.9777210
H -1.7254816 -0.1869061 4.8897274
H 0.3482118 -0.0833071 6.0321432
H 2.4345221 0.0778275 4.6597911
H 3.2714721 0.1534551 2.3764404
H 2.2350290 0.1077513 0.9551229
--
0 1
O 2.0171439 0.0263963 -0.7905108
C 0.9445057 0.0313388 -1.4013109
N -0.2671137 0.0963439 -0.7051367
C -1.5207327 0.1136461 -1.2552546
N -1.7528129 0.0544172 -2.5494108
C -0.6040129 -0.0113445 -3.2574879
C 0.7161247 -0.0244271 -2.8113172
N 1.6041685 -0.0981114 -3.8601422
C 0.8295480 -0.1292217 -4.9265187
N -0.5075993 -0.0802063 -4.6198760
N -2.5513427 0.2447649 -0.3850923
H -0.1820496 0.1041077 0.3219703
H 1.1760819 -0.1871623 -5.9443460
H -1.2844954 -0.0872596 -5.2590531
H -3.4573855 0.0691895 -0.7801319
H -2.4169221 0.0545062 0.6045745
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
C -0.8133331 -0.0866715 2.9789275
N -0.6596824 -0.0799211 4.3738426
C 0.5690989 0.0065483 4.9361002
C 1.6895413 0.0878038 4.1758957
C 1.5121802 0.0788250 2.7574552
N 0.3010370 0.0009434 2.2093349
O -1.9566795 -0.1721648 2.5197622
N 2.5630702 0.1536301 1.9395171
C -1.8703132 -0.1697062 5.1759469
H 0.5992246 0.0046040 6.0165438
H 2.6640151 0.1533452 4.6318609
H 3.4878053 0.1855364 2.3258249
H 2.4345725 0.0922431 0.9177005
H -2.4003892 -1.0888680 4.9431219
H -2.5250547 0.6677091 4.9519253
H -1.5896839 -0.1549564 6.2247983
--
0 1
O 2.2125806 -0.0495287 -0.7919145
C 1.1295844 -0.0091268 -1.3874888
N -0.0672022 0.0855128 -0.6713507
C -1.3285435 0.1453481 -1.2005422
N -1.5812601 0.1027260 -2.4911624
C -0.4460576 0.0088365 -3.2186883
C 0.8814078 -0.0466784 -2.7933188
N 1.7434051 -0.1366731 -3.8585399
C 0.9419745 -0.1354113 -4.9081560
N -0.3886567 -0.0498363 -4.5825778
N -2.3394192 0.3029765 -0.3108597
H 0.0331014 0.0898348 0.3565931
H 1.2604461 -0.1939844 -5.9363793
C -1.5258946 -0.0225882 -5.4753265
H -3.2566158 0.1569838 -0.6925288
H -2.1947491 0.0836423 0.6730029
H -2.1741480 -0.8737721 -5.2827842
H -2.0939825 0.8923400 -5.3277349
H -1.1628640 -0.0654072 -6.4980769
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
N 0.9350155 -0.0279801 -0.3788916
C 1.6739638 -0.0357766 0.7424316
C 3.0747955 -0.0094480 0.5994562
C 3.5646109 0.0195446 -0.7059872
N 2.8531510 0.0258031 -1.8409596
C 1.5490760 0.0012569 -1.5808009
N 4.0885824 -0.0054429 1.5289786
C 5.1829921 0.0253971 0.7872176
N 4.9294871 0.0412404 -0.5567274
N 1.0716177 -0.0765366 1.9391390
H 0.8794435 0.0050260 -2.4315709
H 6.1882591 0.0375542 1.1738824
H 5.6035368 0.0648755 -1.3036811
H 0.0586915 -0.0423765 2.0039181
H 1.6443796 -0.0347395 2.7619159
--
0 1
N -3.9211729 -0.0009646 -1.5163659
C -4.6136833 0.0169051 -0.3336520
C -3.9917387 0.0219348 0.8663338
C -2.5361367 0.0074651 0.8766724
N -1.9256484 -0.0110593 -0.3638948
C -2.5395897 -0.0149474 -1.5962357
C -4.7106131 0.0413373 2.1738637
O -1.8674730 0.0112093 1.9120833
O -1.9416783 -0.0291878 -2.6573783
H -4.4017172 -0.0036078 -2.4004924
H -0.8838255 -0.0216168 -0.3784269
H -5.6909220 0.0269347 -0.4227183
H -4.4439282 -0.8302573 2.7695655
H -4.4267056 0.9186178 2.7530256
H -5.7883971 0.0505530 2.0247280
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
N 1.4233678 -2.5755572 -0.0177928
C 2.4164068 -1.6737862 -0.0069340
N 3.6894208 -2.0970069 0.0011813
C 4.6820785 -1.1903949 0.0099205
N 4.6008957 0.1417597 0.0124822
C 3.3223927 0.5333463 0.0042869
C 2.1894368 -0.2816944 -0.0057960
N 1.0473244 0.4790858 -0.0114405
C 1.4850348 1.7325334 -0.0051890
N 2.8426160 1.8225456 0.0046153
H 5.6822446 -1.6040945 0.0161209
H 0.8406886 2.5978945 -0.0067705
C 3.6543123 3.0217765 0.0114721
H 1.6817907 -3.5456829 -0.0088365
H 0.4430037 -2.3144424 -0.0119382
H 4.2913482 3.0300392 0.8917344
H 4.2797691 3.0498799 -0.8767515
H 2.9957930 3.8849549 0.0253861
--
0 1
N -1.7137952 0.0896100 -0.0160334
C -2.4110219 1.2713542 -0.0102985
N -3.7875706 1.1256389 -0.0070257
C -4.3748768 -0.1132637 0.0070053
C -3.6761626 -1.2738156 0.0090266
C -2.2277642 -1.1903377 -0.0021447
O -1.8742013 2.3720388 -0.0087627
O -1.4812933 -2.1730005 -0.0012821
C -4.3178351 -2.6216898 0.0206209
C -4.5749913 2.3488054 0.0099294
H -0.6740657 0.1873910 -0.0185734
H -5.4567167 -0.1063625 0.0145634
H -4.0169219 -3.1954743 -0.8545578
H -5.4025389 -2.5351168 0.0301255
H -4.0008440 -3.1876298 0.8951907
H -4.2676272 2.9948502 -0.8071476
H -4.4269734 2.8838422 0.9449871
H -5.6215286 2.0832557 -0.1007218
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.7878100 0.0020606 -4.2304138
C -0.8092899 0.0015891 -2.8583291
C 0.5077793 -0.0002173 -2.4445540
N 1.3042354 -0.0007671 -3.5776764
C 0.5251204 0.0003646 -4.7183358
C 0.8109948 -0.0022184 -1.0723889
N -0.3541156 -0.0012118 -0.3048371
C -1.6266470 0.0012022 -0.7959383
N -1.9085135 0.0021412 -2.0882229
O 1.9375199 -0.0047453 -0.5373376
N -2.6234345 0.0038170 0.1093233
O 0.8790764 -0.0004039 -5.8828291
H -1.5908258 0.0034199 -4.8351827
H -0.2139163 -0.0018761 0.7197062
H -3.5584869 0.0004853 -0.2509512
H -2.4522376 -0.0015362 1.1120222
H 2.3082381 -0.0028321 -3.6013832
--
0 1
N 2.3837158 0.0024084 2.1880115
C 1.3635083 0.0013168 3.0441674
C 1.6066986 0.0037303 4.4583838
C 0.5209494 0.0030055 5.2667859
N -0.7245613 -0.0002446 4.7321333
C -0.9573175 -0.0028235 3.3551883
N 0.1301240 -0.0019248 2.5406741
O -2.1208999 -0.0056022 2.9504908
H -1.5467235 -0.0008598 5.3145350
H 0.5868645 0.0048893 6.3446020
H 2.6043649 0.0062410 4.8655851
H 3.3229417 0.0045233 2.5396202
H 2.2148236 0.0000552 1.1689293
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C -0.0399020 0.0000000 -0.0353727
N -0.0114814 0.0000000 1.3676751
C 1.1387066 0.0000000 2.0831816
C 2.3367544 0.0000000 1.4511865
C 2.3093819 0.0000000 0.0167889
N 1.1708246 0.0000000 -0.6653863
O -1.1150036 0.0000000 -0.6203815
N 3.4490015 0.0000000 -0.6790500
H -0.9108314 0.0000000 1.8214404
H 1.0410884 0.0000000 3.1587270
H 3.2607830 0.0000000 2.0056829
H 4.3278873 0.0000000 -0.1979709
H 3.4179786 0.0000000 -1.7058513
--
0 1
O 3.2403090 0.0000000 -3.4870523
C 2.1818608 0.0000000 -4.1224126
N 0.9469308 0.0000000 -3.4715093
C -0.2749703 0.0000000 -4.0613039
N -0.4833351 0.0000000 -5.3532242
C 0.6850532 0.0000000 -6.0363493
C 1.9918547 0.0000000 -5.5441185
N 2.9083577 0.0000000 -6.5651625
C 2.1671699 0.0000000 -7.6581930
N 0.8245995 0.0000000 -7.3970665
H -1.1112956 0.0000000 -3.3753120
H 0.9890303 0.0000000 -2.4307715
H 2.5507838 0.0000000 -8.6645924
H 0.0674277 0.0000000 -8.0602299
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
O -1.3445145 -0.0017812 0.2109785
C -0.5827723 -0.0011739 1.1802676
N 0.8024859 -0.0013909 0.9807253
C 1.7623754 -0.0004370 1.9497163
N 1.5138727 0.0005531 3.2372784
C 0.1802572 0.0006138 3.4773368
C -0.8864943 -0.0001193 2.5787139
N -2.0960454 0.0003342 3.2323438
C -1.7654415 0.0013112 4.5084219
N -0.4076744 0.0015312 4.7105883
N 3.0436067 -0.0006786 1.5010046
H 1.1059736 -0.0017584 -0.0014454
H -2.4611653 0.0018946 5.3301928
H 0.0813329 0.0022447 5.5900062
H 3.7807912 0.2008871 2.1781729
H 3.2486220 -0.0002735 0.5197891
--
0 1
O 1.7201107 -0.0009041 -1.6341114
C 0.9160823 -0.0002542 -2.5726633
N -0.4364135 -0.0009531 -2.4309422
C -1.3901935 -0.0001633 -3.4689726
C -0.8112768 0.0014209 -4.8053842
C 0.5255837 0.0020253 -4.9588359
N 1.3652416 0.0012542 -3.8724327
O -2.5802179 -0.0008229 -3.2195788
H -1.4757079 0.0020523 -5.6523281
H -0.8069002 -0.0016737 -1.4589893
H 1.0090483 0.0031967 -5.9240562
H 2.3651020 0.0016683 -3.9825177
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C -0.0546262 -0.0000666 -0.0449366
N -0.0199900 -0.0000391 1.3370041
C 1.1383570 0.0000076 2.0419758
C 2.3258577 0.0000418 1.3914589
C 2.2850892 0.0000244 -0.0362838
N 1.1291861 -0.0000259 -0.7116098
O -1.1572546 -0.0001260 -0.6121938
N 3.4163218 0.0000650 -0.7389180
H -0.9156158 -0.0000626 1.8021973
H 1.0513420 0.0000150 3.1177694
H 3.2570460 0.0000679 1.9330954
H 4.3021685 0.0000746 -0.2670886
H 3.3885124 0.0000089 -1.7541744
--
1 1
N 0.8209043 0.0000137 -3.4723115
C -0.4088139 0.0000345 -4.0385840
C -0.4948596 0.0000126 -5.4617196
C 0.6604919 -0.0000154 -6.1675372
N 1.8676936 -0.0000144 -5.5424117
C 2.0133448 -0.0000297 -4.1696950
O 3.1022731 -0.0000760 -3.6244111
N -1.4596996 0.0000728 -3.2533202
H 2.7287536 -0.0000519 -6.0699661
H 0.6811915 -0.0000377 -7.2470448
H -1.4500232 0.0000004 -5.9585035
H -2.3755461 0.0000545 -3.6707993
H -1.3585129 0.0000190 -2.2095314
H 0.9279995 0.0000010 -2.4136765
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
O 3.5986069 0.3187715 -0.0000425
C 3.1043656 -0.7907721 0.0000494
N 3.8766606 -1.9463148 0.0000974
C 3.3523333 -3.2081585 0.0000314
C 2.0204180 -3.4182033 0.0000005
C 1.1157326 -2.2823273 -0.0000024
N 1.7481024 -1.0416244 0.0001189
O -0.1074745 -2.3680037 -0.0001125
H 1.6059877 -4.4112397 -0.0000642
H 4.8710378 -1.7927175 -0.0000238
H 1.1448708 -0.2099855 0.0000657
H 4.0702664 -4.0151053 -0.0000042
--
0 1
O 0.0832848 1.3018469 -0.0000275
C -1.1439944 1.3419495 0.0000322
N -1.7902509 2.5769847 0.0001252
C -3.1492468 2.8315090 0.0000032
N -3.9059521 1.6745104 0.0001278
C -3.3673000 0.4141979 0.0000554
C -2.0348331 0.2020916 0.0000036
O -3.6310511 3.9463823 -0.0001798
H -1.6060027 -0.7883399 -0.0000828
H -4.9024679 1.8155815 -0.0000087
H -1.1951712 3.3944157 0.0000479
H -4.0815013 -0.3955360 0.0000308
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
O 3.0139530 -0.0000663 -2.3714607
C 3.0149686 -0.0000275 -1.1572038
N 4.1880062 -0.0001806 -0.4139005
C 4.2197761 0.0000190 0.9517028
C 3.0868691 0.0000352 1.6837875
C 1.8067676 -0.0000168 1.0072050
N 1.8766809 0.0001650 -0.3776725
O 0.7209062 0.0000058 1.5860958
H 3.1056982 -0.0000395 2.7597452
H 5.0361993 0.0000319 -0.9554725
H 0.9797808 -0.0001787 -0.8806223
H 5.2026538 0.0000504 1.3991588
--
0 1
O -0.6997337 0.0001361 -1.5583592
C -1.7610439 0.0000592 -0.9401093
N -2.9691203 0.0001496 -1.6022795
C -4.1792891 0.0000396 -0.9575726
C -4.2572226 -0.0001289 0.3871173
C -3.0393068 -0.0001349 1.1840453
N -1.8606585 -0.0000544 0.4209370
O -2.9932145 -0.0002449 2.4009987
H -5.2057276 0.0004029 0.8960881
H -2.9068168 0.0002610 -2.6064637
H -0.9648574 -0.0000753 0.9279392
H -5.0495438 0.0000916 -1.5966575
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C -0.0268479 0.0001243 -0.0484048
N 0.0107678 -0.0000464 1.3475182
C 1.1662769 -0.0000863 2.0541365
C 2.3502411 -0.0000220 1.4003156
C 2.3025646 0.0000460 -0.0345481
N 1.1568381 0.0001109 -0.7170898
O -1.1319705 0.0002807 -0.5958040
N 3.4410032 0.0000681 -0.7244676
H -0.8870732 -0.0000629 1.8046190
H 1.0808205 -0.0001565 3.1305463
H 3.2850793 -0.0000335 1.9362836
H 4.3196967 -0.0000384 -0.2408021
H 3.4310344 0.0000688 -1.7533114
--
0 1
S 3.6261481 0.0000331 -3.9489529
C 2.0961273 -0.0000288 -4.6037680
N 0.9509789 -0.0001104 -3.8229476
C -0.3400955 -0.0001574 -4.2939721
N -0.6487077 -0.0000023 -5.5786621
C 0.4418258 0.0000453 -6.3611558
C 1.7866043 -0.0000027 -5.9848867
N 2.6124570 0.0000014 -7.0854825
C 1.7805999 0.0000520 -8.1059933
N 0.4593064 0.0000986 -7.7272032
N -1.3218629 -0.0005782 -3.3778714
H 1.0832701 -0.0001308 -2.8047028
H 2.0715160 0.0000504 -9.1429108
H -0.3508790 0.0000799 -8.3240783
H -2.2569895 0.0000670 -3.7397613
H -1.1666925 0.0000488 -2.3691590
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
N -2.3081868 0.7091068 0.0000285
C -1.2051249 1.4749768 -0.0000074
N 0.0068404 0.8955116 -0.0000239
C 1.1080909 1.6831490 -0.0000001
N 1.1718597 3.0108449 0.0000248
C -0.0467899 3.5587068 0.0000144
C -1.2685607 2.8863732 -0.0000096
N -2.3312449 3.7631581 -0.0000201
C -1.7532251 4.9525322 0.0000002
N -0.3875112 4.8900390 0.0000301
H 2.0473735 1.1474715 -0.0000027
H -2.2800394 5.8934739 -0.0000005
H 0.2491479 5.6682121 -0.0000014
H -2.2351122 -0.2997556 -0.0000060
H -3.2108389 1.1496550 -0.0000063
--
0 1
S -1.7524155 -2.8468749 -0.0000015
C -0.1157899 -3.0959877 0.0000038
N 0.7757259 -2.0472741 0.0000119
C 2.1570215 -2.1260472 0.0000127
N 2.6399748 -3.4231595 0.0000214
C 1.8308010 -4.5235818 0.0000026
C 0.4852439 -4.4036601 -0.0000120
O 2.8928976 -1.1571548 -0.0000022
H -0.1424437 -5.2790356 -0.0000032
H 3.6448728 -3.5071844 -0.0000023
H 0.4048510 -1.0816361 -0.0000008
H 2.3325010 -5.4794294 -0.0000001
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
N -0.9044942 0.3053428 -1.9849463
C -1.5722006 0.1028596 -0.8342896
N -0.8984868 -0.0828082 0.3086249
C -1.5939985 -0.2540305 1.4676013
N -2.9198181 -0.2548010 1.6283568
C -3.5512123 -0.0734507 0.4647187
C -2.9785368 0.1103594 -0.7891549
N -3.9279869 0.2613244 -1.7761223
C -5.0691734 0.1716827 -1.1179520
N -4.9024796 -0.0280544 0.2301645
N -0.8371284 -0.4982415 2.5786136
H -6.0473812 0.2435326 -1.5626408
H -5.6242357 -0.1304918 0.9234162
H 0.0816120 0.0641263 -2.0404453
H -1.4560458 0.2977892 -2.8251132
H -1.3448761 -0.4008730 3.4406879
H 0.1151602 -0.1525559 2.5800463
--
0 1
O 1.9075808 -0.3384204 -1.9978152
C 2.5680509 -0.1510537 -0.9753123
N 1.9481026 0.0211081 0.2519027
C 2.5671978 0.2530499 1.4535237
N 3.9422564 0.3000243 1.3759119
C 4.6421414 0.1305464 0.2085191
C 4.0227571 -0.0934717 -0.9708747
O 1.9735147 0.4072231 2.5136021
C 4.7417280 -0.2795724 -2.2650127
H 4.4155554 0.4660468 2.2485693
H 0.8994704 -0.0257608 0.2762592
H 5.7177824 0.1899609 0.2946839
H 4.4976921 -1.2470914 -2.7006324
H 4.4340213 0.4777637 -2.9842064
H 5.8186182 -0.2165735 -2.1235955
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
N 0.3803518 5.2710590 0.0000000
C -0.9827443 5.4382698 0.0000000
N -1.6445695 4.2964704 0.0000000
C -0.6462489 3.3468246 0.0000000
C 0.6196111 3.9194590 0.0000000
N 1.7959710 3.2884069 0.0000000
C 1.6381971 1.9602261 0.0000000
N 0.4661224 1.2614481 0.0000000
C -0.6897222 1.9395705 0.0000000
N 2.7687654 1.2167374 0.0000000
N -1.8599123 1.2852151 0.0000000
H 1.0814580 5.9922927 0.0000000
H -1.4336046 6.4162469 0.0000000
H -1.9018421 0.2719784 0.0000000
H -2.7003404 1.8337683 0.0000000
H 3.6406811 1.7091587 0.0000000
H 2.7459099 0.2072280 0.0000000
--
0 1
C 1.6184078 -2.2819447 0.0000000
N 0.4001260 -1.6517356 0.0000000
C -0.8434818 -2.2665916 0.0000000
C -0.8382446 -3.7219904 0.0000000
C 0.3574635 -4.3499319 0.0000000
N 1.5408193 -3.6563635 0.0000000
C -2.1496426 -4.4344119 0.0000000
O -1.8782774 -1.6012490 0.0000000
O 2.6941241 -1.6972351 0.0000000
H 0.4220912 -0.6062649 0.0000000
H 2.4263852 -4.1347427 0.0000000
H 0.4460260 -5.4270144 0.0000000
H -2.7348414 -4.1561542 -0.8748178
H -2.0041973 -5.5125757 0.0000000
H -2.7348414 -4.1561542 0.8748178
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
N -5.1985541 0.4936739 0.0318901
C -5.3820983 -0.8616291 0.0378780
H -6.3663575 -1.2989623 0.0563254
N -4.2515351 -1.5472672 0.0187374
C -3.2863426 -0.5669089 -0.0011454
C -1.8812597 -0.6327273 -0.0265724
N -1.2256807 -1.8085660 -0.0449899
H -1.7524474 -2.6605390 0.0028082
H -0.2208316 -1.8257421 -0.0311968
N -1.1915263 0.5144581 -0.0390690
C -1.8701238 1.6793288 -0.0286861
H -1.2534253 2.5689235 -0.0396068
N -3.1871042 1.8787826 -0.0064054
C -3.8427150 0.7125075 0.0068719
H -5.9109500 1.2044355 0.0439248
--
0 1
C 4.4082682 1.3958429 0.0182886
C 4.9187035 0.0992764 0.0212789
H 5.9905108 -0.0483384 0.0314539
C 4.0880886 -1.0223564 0.0114675
C 4.6130388 -2.4267390 0.0143734
H 4.2620014 -2.9754574 0.8873371
H 4.2783956 -2.9729351 -0.8665709
H 5.7002762 -2.4237810 0.0245680
C 2.7198280 -0.7726204 -0.0014366
F 1.8841246 -1.8434019 -0.0116101
C 2.1541812 0.4899806 -0.0052011
H 1.0780578 0.6238951 -0.0168588
C 3.0326742 1.5626996 0.0050555
F 2.5236686 2.8066583 0.0017348
H 5.0549009 2.2603064 0.0258240
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
O 0.3144345 -1.1442948 0.0144949
C 1.3535742 -0.4837792 0.0219615
N 1.2957095 0.9167643 0.0619980
C 2.3569978 1.7719209 0.0564464
N 3.6092442 1.3935257 0.0165258
C 3.7175450 0.0431271 -0.0038819
C 2.7154197 -0.9260868 -0.0053966
N 3.2450349 -2.1934744 -0.0364308
C 4.5477687 -1.9938501 -0.0526937
N 4.8854175 -0.6639760 -0.0358567
N 2.0456853 3.1046346 0.1569813
H 0.3457459 1.3051947 0.0833829
H 5.2946011 -2.7690789 -0.0763507
H 5.8090734 -0.2651897 -0.0402909
H 2.8020253 3.7137889 -0.1056218
H 1.1380549 3.3808140 -0.1794301
--
0 1
O -1.3169188 1.9540889 -0.0350694
C -2.3291669 1.2492795 -0.0287951
N -2.3117178 -0.1155253 -0.0025392
C -3.4195015 -0.9596499 0.0079063
C -4.6908528 -0.2770302 -0.0148404
C -4.7376559 1.0709532 -0.0414746
N -3.5816109 1.8128549 -0.0480085
S -3.2558282 -2.5902318 0.0425862
H -5.5930477 -0.8648219 -0.0095256
H -1.3692770 -0.5503586 0.0090372
H -5.6601199 1.6316629 -0.0586834
H -3.6021712 2.8189896 -0.0637914
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
O 0.2958263 -1.2112383 0.3424002
C 1.3078216 -0.5286166 0.1794526
N 1.2037967 0.8617133 0.0406924
C 2.2285795 1.7353917 -0.1726765
N 3.4850711 1.3842589 -0.2697628
C 3.6408598 0.0475045 -0.1126641
C 2.6766347 -0.9371114 0.0982060
N 3.2448896 -2.1840776 0.1912254
C 4.5343218 -1.9574061 0.0391817
N 4.8260700 -0.6298861 -0.1485794
N 1.8780889 3.0609691 -0.2292539
H 0.2531805 1.2355886 0.1433595
H 5.3034685 -2.7105809 0.0557498
H 5.7326070 -0.2147977 -0.2854525
H 2.6033664 3.6462550 -0.6086135
H 0.9511283 3.2665112 -0.5646975
--
0 1
S -1.8220636 2.0964300 0.3299922
C -2.7962507 0.7575832 0.0980480
N -2.3768437 -0.5243435 0.0586503
C -3.1865402 -1.6725337 -0.1260894
C -4.6014876 -1.3770894 -0.2724329
C -5.0258914 -0.0988346 -0.2322480
N -4.1419110 0.9324971 -0.0553359
O -2.6885486 -2.7802641 -0.1499990
H -5.2868250 -2.1961027 -0.4114699
H -1.3618003 -0.7128334 0.1812758
H -6.0623003 0.1861824 -0.3346801
H -4.4551541 1.8895240 -0.0237988
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -1.2382495 0.0003068 3.2761967
N -0.8377699 -0.0002822 4.6262520
C 0.4580599 -0.0008039 5.0168008
C 1.4462017 -0.0007290 4.0905459
C 1.0380467 -0.0000946 2.7139311
N -0.2347225 0.0003919 2.3461594
O -2.4294469 0.0006718 3.0053097
N 1.9638405 0.0000201 1.7458948
H -1.5882710 -0.0003035 5.2983303
H 0.6465072 -0.0012211 6.0804324
H 2.4837567 -0.0011342 4.3810193
H 2.9358093 -0.0004389 1.9899669
H 1.6736506 0.0004167 0.7589718
--
0 1
N -1.1590741 0.0004019 -0.4138632
C -0.2319446 0.0003452 -1.3716397
N 1.0782989 0.0006222 -1.0483779
C 1.9971055 0.0005759 -2.0347184
N 1.8153528 0.0002525 -3.3521684
C 0.5065246 -0.0000627 -3.6438316
C -0.5584383 -0.0000492 -2.7449616
N -1.7730910 -0.0004726 -3.3901944
C -1.4412382 -0.0006880 -4.6700413
N -0.0894486 -0.0004698 -4.8806889
H 3.0276349 0.0008204 -1.7005582
H -2.1424713 -0.0010329 -5.4876572
H 0.3894075 -0.0006068 -5.7657319
H -0.8924030 0.0005975 0.5753416
H -2.1264189 0.0000818 -0.6840313
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
O 1.7709955 2.3306811 0.0000007
C 0.5807567 2.6278573 -0.0000017
N -0.3963320 1.6134139 0.0000053
C -1.7496702 1.7814670 -0.0000008
N -2.3431748 2.9556940 0.0000044
C -1.4426427 3.9663046 -0.0000011
C -0.0499495 3.9187042 0.0000001
N 0.4958359 5.1819761 -0.0000084
C -0.5511865 5.9829108 0.0000000
N -1.7428313 5.3021768 0.0000079
N -2.4981258 0.6511289 -0.0000042
H -0.0110714 0.6591960 -0.0000028
H -0.5111672 7.0591526 0.0000000
H -2.6703203 5.6919780 -0.0000047
H -3.4931951 0.7676564 0.0000001
H -2.1057181 -0.2785664 0.0000054
--
0 1
O -1.7956163 -2.4184989 0.0000035
C -0.6750612 -2.9133012 -0.0000022
N -0.5075308 -4.3172987 0.0000000
C 0.6872157 -4.9931784 0.0000035
N 1.8564548 -4.4063354 0.0000002
C 1.7518235 -3.0547138 -0.0000036
C 0.6020117 -2.2761182 0.0000000
N 0.9066335 -0.9422663 -0.0000018
C 2.2267133 -0.8962763 0.0000026
N 2.7793888 -2.1508756 0.0000002
N 0.6219231 -6.3495411 -0.0000038
H -1.3767427 -4.8336984 0.0000000
H 2.7978322 0.0182460 0.0000000
H 3.7606420 -2.3771663 -0.0000001
H 1.4878769 -6.8540474 0.0000006
H -0.2426849 -6.8513669 0.0000027
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
O -2.1042101 2.1109877 -0.0000208
C -0.9861671 2.6187107 -0.0000392
N 0.1630602 1.8075078 0.0000102
C 1.4605433 2.2338425 0.0001555
N 1.8191828 3.4984777 -0.0000542
C 0.7428289 4.3179348 -0.0001135
C -0.6139590 4.0028703 -0.0000745
N -1.3931127 5.1362751 -0.0000141
C -0.5202985 6.1236314 -0.0000063
N 0.7801628 5.6851901 -0.0000822
N 2.4058015 1.2657395 0.0011086
H -0.0159870 0.7952237 0.0001524
H -0.7678388 7.1715226 0.0000486
H 1.6159456 6.2449111 -0.0000097
H 3.3631616 1.5602679 -0.0004150
H 2.1837654 0.2818725 -0.0005076
--
0 1
S 2.4306485 -2.2874888 -0.0000855
C 0.9168812 -2.9468359 -0.0000744
N 0.7467080 -4.3320487 -0.0000158
C -0.4440553 -5.0077630 0.0002108
N -1.6149750 -4.4178761 -0.0000758
C -1.5042335 -3.0717080 -0.0001193
C -0.3421815 -2.2986807 -0.0000424
N -0.6475784 -0.9636606 0.0000146
C -1.9693802 -0.9175554 -0.0000159
N -2.5269735 -2.1675435 -0.0001085
N -0.3741558 -6.3629805 0.0018230
H 1.6159815 -4.8501428 0.0002176
H -2.5316778 0.0048746 0.0000297
H -3.5101913 -2.3858700 -0.0000954
H -1.2351647 -6.8756289 -0.0006400
H 0.4955304 -6.8577215 -0.0011689
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
S -1.8166246 2.6821898 -0.0001323
C -0.1580956 2.6810732 -0.0000609
N 0.5668809 1.4952672 0.0000612
C 1.9289258 1.3802774 0.0002313
N 2.7529724 2.4104948 0.0000355
C 2.0872014 3.5809186 -0.0000521
C 0.7090604 3.8070941 -0.0000816
N 0.4280062 5.1539248 -0.0000845
C 1.6128743 5.7291564 -0.0000496
N 2.6486647 4.8271187 -0.0000590
N 2.4376947 0.1291202 0.0011300
H 0.0139020 0.6266213 0.0001583
H 1.7867621 6.7921840 -0.0000082
H 3.6347397 5.0250186 0.0000158
H 3.4373976 0.0573910 -0.0001455
H 1.8889030 -0.7194971 -0.0004252
--
0 1
O 1.5845436 -2.6967539 -0.0001530
C 0.4227905 -3.0880579 -0.0000036
N 0.1350039 -4.4687763 0.0000593
C -1.1124849 -5.0407117 -0.0002399
N -2.2264983 -4.3563305 -0.0000043
C -2.0034350 -3.0192765 0.0001096
C -0.7909434 -2.3397387 0.0000690
N -0.9816420 -0.9826389 0.0000103
C -2.2944101 -0.8306632 0.0000066
N -2.9493710 -2.0333946 0.0000657
N -1.1503939 -6.3976637 -0.0016596
H 0.9553753 -5.0587791 -0.0001303
H -2.7848264 0.1304484 -0.0000198
H -3.9468229 -2.1737881 0.0000098
H -2.0477574 -6.8436736 0.0002445
H -0.3206311 -6.9565723 0.0005285
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
O -1.3058058 0.3353432 -1.9024452
C -1.9900049 0.1709800 -0.8920389
N -1.3797575 0.0147348 0.3602195
C -2.0188483 -0.2185512 1.5398296
N -3.3172819 -0.2995486 1.6835149
C -3.9535777 -0.1111832 0.5011380
C -3.4163316 0.1070768 -0.7658722
N -4.4021933 0.2259169 -1.7159019
C -5.5203687 0.0828308 -1.0307622
N -5.3061034 -0.1241201 0.3081537
N -1.1984729 -0.3198503 2.6442461
H -0.3465494 0.1232217 0.3842921
H -6.5125630 0.1194762 -1.4473490
H -5.9980490 -0.2607961 1.0259833
H -1.6811020 -0.7086455 3.4387722
H -0.3023253 -0.7473222 2.4686403
--
0 1
N 1.4487686 -0.3061821 -1.8063482
C 2.1291340 -0.0639194 -0.6809352
N 1.4721887 0.2311616 0.4586152
C 2.1772393 0.4822275 1.5830193
N 3.4937302 0.4667260 1.7609342
C 4.1213725 0.1541959 0.6180841
C 3.5365208 -0.1183154 -0.6181769
N 4.4783743 -0.3859600 -1.5825801
C 5.6237470 -0.2779583 -0.9307987
N 5.4697684 0.0435735 0.3900585
H 1.5797459 0.7376023 2.4499547
H 6.5972124 -0.4228222 -1.3684582
H 6.1971892 0.1827155 1.0717315
H 0.4522459 -0.0785075 -1.8709034
H 1.9849455 -0.4365244 -2.6464500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
N 0.5317472 -1.5315785 0.0000000
C 1.6654518 -2.2639451 0.0000000
N 1.8178303 -3.5826276 0.0000000
C 0.6256673 -4.1952397 0.0000000
C -0.6270275 -3.5869658 0.0000000
C -0.6548527 -2.1760608 0.0000000
N 0.3553662 -5.5402802 0.0000000
C -1.0067488 -5.6700992 0.0000000
N -1.6430990 -4.5118780 0.0000000
H 1.0374873 -6.2804981 0.0000000
H -1.4834180 -6.6357628 0.0000000
H 2.5904811 -1.6988711 0.0000000
N -1.8018223 -1.4963325 0.0000000
H -2.6555758 -2.0258909 0.0000000
H -1.8291436 -0.4726173 0.0000000
--
0 1
C 1.5820983 2.1166821 0.0000000
N 0.3982589 1.4363592 0.0000000
C -0.8811915 2.0133126 0.0000000
C -0.7922275 3.4409053 0.0000000
C 0.4748735 4.0188928 0.0000000
N 1.6897724 3.4229374 0.0000000
N 0.2422898 5.3650391 0.0000000
C -1.1196961 5.5363613 0.0000000
N -1.7806879 4.3962812 0.0000000
N 2.7046701 1.3533161 0.0000000
H 2.6551620 0.3544770 0.0000000
H 3.5911246 1.8188331 0.0000000
H 0.4300601 0.4044282 0.0000000
O -1.8846384 1.3003960 0.0000000
H -1.5681188 6.5152131 0.0000000
H 0.9500064 6.0803260 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
O -1.3082180 -0.2837400 -5.2857830
C -1.2591040 -0.1735270 -4.0737910
N -0.0039450 -0.3880190 -3.4022040
C 0.2384190 -0.3204430 -2.0528570
N -0.7115810 -0.0321780 -1.1774330
C -1.9176870 0.2005520 -1.7572050
C -2.2714950 0.1569770 -3.1054550
N -3.6087050 0.4637830 -3.2773380
C -4.0524060 0.6893350 -2.0695760
N -3.0715720 0.5411370 -1.1003800
N 1.4926750 -0.5945950 -1.6152550
H 0.7445330 -0.6355620 -4.0377220
H -5.0658850 0.9619290 -1.8095140
H -3.1606740 0.6975300 -0.1076790
H 1.7292200 -0.3123020 -0.6555620
H 2.2464200 -0.6005410 -2.2841000
--
0 1
N -0.6357410 -0.7643850 1.7470590
C 0.2971930 -0.4605580 2.6687480
N -0.0180070 -0.6125000 3.9730480
C 0.8974790 -0.3069680 4.9024040
N 2.1418480 0.1550240 4.7356170
C 2.4316560 0.2831700 3.4365580
C 1.5994220 -0.0008880 2.3483350
N 2.2633010 0.2340730 1.1506720
C 3.4544220 0.6547710 1.5098070
N 3.6165670 0.7091770 2.8719470
H 0.5799100 -0.4518430 5.9329470
H 4.2477100 0.9349840 0.8305350
H 4.4400710 0.9945670 3.3802140
H -1.5203500 -1.0951610 2.1001930
H -0.5364890 -0.5388150 0.7550260
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '25')] = qcdb.Molecule("""
0 1
C 1.2592909 1.6400416 0.0000000
N -0.0329944 1.3939366 0.0000000
C -0.7724457 2.5351119 0.0000000
C -0.3568209 3.8625834 0.0000000
C 1.0524521 4.1391914 0.0000000
N 1.7707185 2.9096522 0.0000000
N 2.1434672 0.6224336 0.0000000
N -1.4258984 4.7253576 0.0000000
C -2.4770874 3.9323264 0.0000000
N -2.1383285 2.6014184 0.0000000
H -2.7704070 1.8193281 0.0000000
H -3.5031441 4.2568795 0.0000000
O 1.6606912 5.1923670 0.0000000
H 2.7730000 3.0373368 0.0000000
H 1.8138435 -0.3438647 0.0000000
H 3.1276914 0.8060391 0.0000000
--
0 1
C 2.2859985 -3.1747071 0.0000000
N 1.3685098 -2.2195054 0.0000000
C 0.1720555 -2.9042803 0.0000000
N 1.7524294 -4.4267217 0.0000000
C 0.3848788 -4.2845108 0.0000000
C -1.1754152 -2.4860287 0.0000000
N -2.1251928 -3.4313144 0.0000000
C -1.7646506 -4.7253100 0.0000000
N -0.5383069 -5.2487516 0.0000000
H 3.3496602 -3.0075287 0.0000000
H 2.2521180 -5.3008143 0.0000000
H -2.5835604 -5.4328271 0.0000000
N -1.5512019 -1.1969440 0.0000000
H -0.8988350 -0.4160731 0.0000000
H -2.5417242 -1.0304237 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '26')] = qcdb.Molecule("""
0 1
O 1.0272885 -1.7927509 -0.4061508
C 1.7699883 -0.8358415 -0.1967480
N 1.2319553 0.4331963 0.0618047
C 1.9364691 1.5620604 0.3460907
N 3.2411330 1.6380980 0.3913752
C 3.8110938 0.4396217 0.1071531
C 3.2032408 -0.7831141 -0.1709853
N 4.1349400 -1.7700995 -0.3829089
C 5.2907910 -1.1513051 -0.2354273
N 5.1516924 0.1802511 0.0617335
N 1.1709724 2.6978601 0.5477231
H 0.2039336 0.5207236 -0.0280886
H 6.2587500 -1.6129704 -0.3315009
H 5.8835828 0.8525276 0.2204394
H 1.7146455 3.4340823 0.9714033
H 0.3029369 2.5238305 1.0323857
--
0 1
N -1.6634540 -2.1503266 0.4844345
C -2.7337243 -1.3645844 0.2851342
N -3.9617362 -1.8916536 0.4117484
C -5.0361049 -1.1108105 0.2142857
N -5.0856134 0.1814610 -0.1169411
C -3.8521385 0.6813689 -0.2227871
C -2.6434417 0.0104508 -0.0286100
N -1.5757669 0.8650951 -0.2045833
C -2.1325255 2.0285854 -0.5143055
N -3.4907538 1.9716246 -0.5383952
H -5.9917639 -1.6042053 0.3341775
H -1.5835904 2.9296030 -0.7337374
H -4.1262380 2.7236162 -0.7489234
H -1.8639094 -3.1302901 0.5932195
H -0.7358064 -1.8946489 0.1475504
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '27')] = qcdb.Molecule("""
0 1
O 5.3545637 -1.5839084 0.1643820
C 4.3016967 -0.9781139 0.1123080
N 3.0713646 -1.6809667 0.2659334
C 1.8042754 -1.1676035 0.2494339
N 1.5592029 0.1129485 0.0745442
C 2.6978871 0.8367062 -0.0926982
C 4.0240233 0.4175244 -0.0912729
N 4.8841613 1.4681703 -0.3008504
C 4.0879153 2.5107452 -0.4295656
N 2.7596551 2.1849030 -0.3099959
N 0.7798744 -2.0309505 0.4761070
H 3.1965102 -2.6688068 0.4391670
H 4.4098825 3.5222321 -0.6091205
H 1.9722362 2.8053438 -0.3963660
H -0.1527293 -1.7022269 0.2002243
H 0.9606535 -3.0046040 0.3028696
--
0 1
N -1.2242031 1.0428672 0.4916050
C -2.2040220 0.1989638 0.1394948
N -1.9060140 -1.0547654 -0.2435106
C -2.9084418 -1.8851915 -0.5992935
N -4.2169079 -1.6502337 -0.6282927
C -4.4819036 -0.3976671 -0.2338853
C -3.5616520 0.5734472 0.1583798
N -4.1759166 1.7562758 0.4961983
C -5.4596333 1.4978989 0.3100336
N -5.7008222 0.2244483 -0.1272061
H -2.5970464 -2.8772539 -0.9009149
H -6.2592754 2.1999750 0.4769792
H -6.5949340 -0.1885904 -0.3349675
H -0.2461099 0.7667993 0.3993852
H -1.4852074 1.9635263 0.7951137
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '28')] = qcdb.Molecule("""
0 1
N -1.2744921 -0.0017953 -1.3782659
C -2.1152770 -0.0015179 -0.3381875
N -1.6375936 -0.0033248 0.9198720
C -2.5074052 -0.0030670 1.9505655
N -3.8371522 -0.0012513 1.9239415
C -4.2828181 0.0005849 0.6601536
C -3.5155952 0.0006434 -0.5034237
N -4.2984407 0.0028211 -1.6341051
C -5.5305121 0.0040931 -1.1536448
N -5.5811183 0.0027919 0.2133605
H -2.0528877 -0.0045852 2.9334633
H -6.4253358 0.0059818 -1.7533163
H -6.4045390 0.0033962 0.7920186
H -0.2596925 -0.0029765 -1.2406098
H -1.6728767 -0.0000588 -2.3000471
--
0 1
N 1.2734087 -0.0017991 1.3765409
C 2.1149782 -0.0015194 0.3371102
N 1.6382658 -0.0033244 -0.9213222
C 2.5089058 -0.0030642 -1.9513246
N 3.8386381 -0.0012477 -1.9236383
C 4.2833025 0.0005866 -0.6594937
C 3.5151554 0.0006426 0.5034653
N 4.2970793 0.0028188 1.6347785
C 5.5295364 0.0040924 1.1553161
N 5.5812428 0.0027936 -0.2116508
H 2.0551790 -0.0045813 -2.9345889
H 6.4238744 0.0059806 1.7557114
H 6.4051319 0.0033994 -0.7896412
H 0.2587211 -0.0029792 1.2389462
H 1.6714569 -0.0000645 2.2984633
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '29')] = qcdb.Molecule("""
0 1
N -1.1366363 -0.0666108 -1.4922977
C -1.9363007 -0.0408578 -0.4194776
N -1.4071800 -0.0593819 0.8158785
C -2.2352343 -0.0402428 1.8802964
N -3.5645999 -0.0046868 1.9074668
C -4.0608616 0.0128417 0.6631804
C -3.3407627 -0.0019209 -0.5302183
N -4.1677258 0.0274614 -1.6288352
C -5.3791670 0.0592559 -1.0991816
N -5.3753601 0.0520242 0.2686301
H -1.7428602 -0.0568025 2.8448773
H -6.2968580 0.0879838 -1.6624329
H -6.1747025 0.0724119 0.8798069
H -0.1211878 -0.0517077 -1.3879919
H -1.5677584 -0.0159058 -2.3974751
--
0 1
N 1.8123343 -0.0245408 -1.2588738
C 2.7152804 -0.0039456 -0.2161678
C 2.5630991 -0.0007999 1.1886196
N 3.6772242 0.0214724 1.9383141
C 4.8749026 0.0394397 1.3318824
N 5.1551234 0.0392981 0.0273056
C 4.0284465 0.0171443 -0.6906539
N 3.9051515 0.0090860 -2.0602851
C 2.5735625 -0.0163877 -2.3426881
N 1.3736145 -0.0168867 1.8043279
H 5.7275827 0.0569289 1.9982511
H 2.2043398 -0.0288835 -3.3545151
H 4.6669159 0.0187011 -2.7180078
H 1.3935693 -0.0168974 2.8088363
H 0.4799202 -0.0417563 1.3125324
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '30')] = qcdb.Molecule("""
0 1
N 1.9051383 -0.1221668 1.3018987
C 1.2306207 -0.0724496 2.4599521
N 1.9188011 -0.0911776 3.6114745
C 1.2468662 -0.0458415 4.7741848
N -0.0695564 0.0217460 4.9796590
C -0.7242768 0.0390915 3.8148784
C -0.1771642 -0.0054840 2.5309668
N -1.1622374 0.0280993 1.5676491
C -2.2864196 0.0935989 2.2653565
N -2.0826685 0.1041226 3.6120321
H 1.8637884 -0.0652578 5.6631315
H -3.2746103 0.1375165 1.8384429
H -2.7831685 0.1478311 4.3334811
H 2.9067312 -0.1495396 1.3694272
H 1.4544353 -0.0668897 0.3920587
--
0 1
N -1.9061558 -0.0641839 -1.3006378
C -1.2309020 -0.0492565 -2.4591003
N -1.9184496 -0.0947841 -3.6103362
C -1.2461054 -0.0720254 -4.7734664
N 0.0702056 -0.0055843 -4.9798973
C 0.7244044 0.0360165 -3.8154103
C 0.1769597 0.0162410 -2.5310350
N 1.1617828 0.0675370 -1.5681922
C 2.2861580 0.1193292 -2.2667167
N 2.0827966 0.1037780 -3.6134111
H -1.8625819 -0.1122096 -5.6620263
H 3.2742511 0.1708248 -1.8404275
H 2.7834317 0.1345490 -4.3353905
H -2.9057988 -0.1329805 -1.3671674
H -1.4533881 -0.0384046 -0.3902022
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '31')] = qcdb.Molecule("""
0 1
C -5.2998476 2.1696769 -0.1527418
N -4.0012332 2.3280352 0.0113365
C -3.5133825 1.0430020 0.0349501
N -5.6731934 0.8519995 -0.2385098
C -4.5348158 0.1058659 -0.1189924
N -2.1562547 -0.8394339 0.1532486
C -3.2316395 -1.6611847 -0.0119881
N -4.4661255 -1.2464671 -0.1599515
N -2.9527518 -3.0009490 0.0413727
H -6.0192845 2.9682219 -0.2165198
H -2.0059608 -3.2718138 -0.1741253
H -3.6766313 -3.5832294 -0.3425488
H -6.6016641 0.4841764 -0.3616632
H -1.2330572 -1.2973531 0.2284474
C -2.1744618 0.5575368 0.1742023
O -1.1240037 1.1950526 0.2956190
--
0 1
N 1.2638556 -0.1422038 0.1193086
C 1.2024891 -1.5065889 0.0798953
N 2.5150609 -1.9393418 -0.0155197
C 3.3711631 -0.8652191 -0.0318660
C 2.5823266 0.2608198 0.0542710
O 0.2004187 -2.2363595 0.1229392
C 3.1762467 1.5568980 0.0478905
N 4.5883289 1.4099390 -0.0474268
C 5.2845121 0.2404436 -0.1157151
N 4.7218815 -0.9391943 -0.1032764
O 2.6645381 2.6620253 0.1017547
N 6.6513076 0.3472133 -0.2677366
H 7.0896806 1.0869563 0.2575269
H 7.1055907 -0.5431980 -0.1372108
H 2.7741742 -2.9100741 -0.0564635
H 0.4151138 0.4528548 0.1972152
H 5.0828171 2.2880907 -0.1283728
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '32')] = qcdb.Molecule("""
0 1
S -0.2983354 -0.0000513 0.0606545
C -0.2090863 -0.0000888 1.7027085
N 0.9916329 -0.0001803 2.3727915
C 1.1063707 0.0000024 3.7325291
C 0.0163475 0.0001662 4.5304894
C -1.2953719 0.0000721 3.9249070
N -1.2941374 -0.0000268 2.5353745
O -2.3533146 0.0000820 4.5510728
H 0.0943097 0.0003348 5.6041112
H 1.8067829 -0.0002554 1.7802008
H -2.2194333 0.0000304 2.0853365
H 2.1154718 0.0000158 4.1172107
--
0 1
S -4.3480040 0.0005221 1.2455679
C -5.4129697 0.0002108 2.5234518
N -6.7626348 0.0001784 2.2970286
C -7.6987363 0.0000487 3.2957366
C -7.3354268 -0.0001000 4.5945383
C -5.9267360 -0.0001870 4.9466675
N -5.0628029 0.0000198 3.8318987
O -5.4752372 -0.0004103 6.0770807
H -8.0659143 -0.0001724 5.3856633
H -7.0337303 0.0003183 1.3267291
H -4.0549036 0.0000246 4.0487228
H -8.7287725 0.0000774 2.9713072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '33')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 3.0629991 16.2869954 -0.5529998
N 4.3679988 15.6949956 -0.7379998
C 5.4889985 16.5069954 -0.6549998
O 5.3979985 17.7169950 -0.4679999
N 6.6749981 15.8589956 -0.7949998
C 6.8699981 14.5069959 -0.9999997
O 8.0199978 14.0679961 -1.0789997
C 5.6559984 13.7139962 -1.1019997
C 5.7709984 12.2569966 -1.4029996
C 4.4739987 14.3319960 -0.9639997
H 7.5313379 16.4637704 -0.7443448
H 6.3741672 11.7424167 -0.6472968
H 4.7881707 11.7797217 -1.4448876
H 6.2751442 12.0930036 -2.3618343
H 3.5293140 13.8026561 -1.0289747
H 2.3790703 15.9479585 -1.3364316
H 2.6423583 16.0249025 0.4245489
H 3.1730521 17.3682771 -0.6086068
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '34')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
C 18.8919947 9.6579973 9.7709973
N 18.5279948 11.0699969 9.5879973
C 19.3769946 12.1419966 9.6129973
N 18.7759947 13.3089963 9.4319974
C 17.4529951 12.9639964 9.3169974
C 16.2779954 13.7529961 9.1209974
O 16.2219955 14.9839958 9.0219975
N 15.1359958 13.0409963 9.0449975
C 15.0849958 11.6719967 9.1349974
N 13.8449961 11.1639969 9.0359975
N 16.1359955 10.8809970 9.3169974
C 17.2759952 11.5909968 9.3939974
H 14.2561290 13.5779002 8.9264415
H 13.0353973 11.7259537 8.7509445
H 13.7773141 10.1594092 9.0213535
H 17.9866610 9.0649795 9.6385253
H 19.2909706 9.4904943 10.7753660
H 19.6360815 9.3587324 9.0282525
H 20.4431063 12.0114766 9.7460263
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '35')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '36')] = qcdb.Molecule("""
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
--
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '37')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '38')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '39')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
C -1.5982280 -2.9490360 3.3600000
N -2.8308990 -3.5868360 3.3600000
C -4.0005400 -2.9065270 3.3600000
C -4.0107280 -1.5698660 3.3600000
C -2.7192980 -0.9187180 3.3600000
N -1.5949260 -1.5998660 3.3600000
O -0.5980710 -3.6295230 3.3600000
N -2.6531990 0.4024280 3.3600000
H -2.8066410 -4.5810390 3.3600000
H -4.8972920 -3.4971900 3.3600000
H -4.9235800 -1.0089750 3.3600000
H -3.4794940 0.9500750 3.3600000
H -1.7581040 0.8646590 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '40')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
O -0.0130090 1.6513790 3.3600000
C 1.0692420 1.1086750 3.3600000
N 1.1423840 -0.2839060 3.3600000
C 2.2854260 -1.0221180 3.3600000
N 3.4793830 -0.5000700 3.3600000
C 3.4550460 0.8444100 3.3600000
C 2.3724120 1.6891490 3.3600000
N 2.7838090 3.0076580 3.3600000
C 4.0586690 2.9492050 3.3600000
N 4.5367780 1.6576590 3.3600000
N 2.1345620 -2.3493720 3.3600000
H 0.2550220 -0.7614500 3.3600000
H 4.7229940 3.7894010 3.3600000
H 5.4838790 1.3605800 3.3600000
H 2.9609760 -2.8966530 3.3600000
H 1.2381640 -2.7944260 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '41')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.5825730 -0.2790500
C -1.8533500 -0.3518640 -0.0620430
N -0.9943890 0.6521290 0.1149880
C -1.4604570 1.8814980 0.3317590
N -2.7070820 2.2763020 0.4013740
C -3.5527210 1.2640760 0.2228910
C -3.2236500 -0.0504790 -0.0089010
N -4.3580740 -0.8272780 -0.1458710
C -5.3247240 -0.0009840 -0.0001730
N -4.9130980 1.2870000 0.2269330
H -0.7040060 2.6348130 0.4645890
H -6.3651290 -0.2529400 -0.0446000
H -5.4840420 2.0871050 0.3680130
H -0.4093220 -1.7576030 -0.3099130
H -2.0356960 -2.3259680 -0.4101310
--
0 1
O 2.4555320 -0.5209070 3.3788050
C 2.5333330 0.6704300 3.2169230
N 1.4067200 1.4246400 2.9925690
C 1.3497150 2.7756270 2.7939400
N 2.5708460 3.3948650 2.8321660
C 3.7496420 2.7230470 3.0501750
C 3.8036770 1.4072660 3.2434740
O 0.3307920 3.3742620 2.6029400
C 5.0685490 0.6342580 3.4848390
H 2.5588020 4.3783590 2.6906210
H 0.5200190 0.9342720 2.9706210
H 4.6288470 3.3398640 3.0533080
H 5.0316430 0.1233820 4.4405330
H 5.2089370 -0.1230850 2.7216660
H 5.9296670 1.2931580 3.4791940
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '42')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8647480 0.3288050
C 2.4435780 -0.9466670 0.1669230
N 1.9754430 0.3257080 -0.0574310
C 2.7234150 1.4521870 -0.2560600
N 4.0753100 1.2353980 -0.2178340
C 4.6340910 -0.0009940 0.0001750
C 3.9044100 -1.0972430 0.1934740
O 2.2509580 2.5354000 -0.4470600
C 4.4733490 -2.4660930 0.4348390
H 4.6436490 2.0381400 -0.3593790
H 0.9698550 0.4501820 -0.0793790
H 5.7079390 -0.0187620 0.0033080
H 4.1432070 -2.8577080 1.3905330
H 4.1417710 -3.1613140 -0.3283340
H 5.5573000 -2.4391850 0.4291940
--
0 1
N -0.1962490 -2.0987510 2.7709500
C -1.2925710 -1.3740360 2.9879570
N -1.1877890 -0.0569040 3.1649880
C -2.2874510 0.6637280 3.3817590
N -3.5280520 0.2503840 3.4513740
C -3.6172170 -1.0655780 3.2728910
C -2.5783160 -1.9356530 3.0410990
N -3.0394940 -3.2308940 2.9041290
C -4.3072140 -3.1305910 3.0498260
N -4.7312590 -1.8466420 3.2769330
H -2.1182570 1.7178040 3.5145890
H -5.0008230 -3.9459620 3.0054000
H -5.6634530 -1.5349360 3.4180130
H 0.7019450 -1.6625240 2.7400870
H -0.2797430 -3.0783000 2.6398690
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '43')] = qcdb.Molecule("""
0 1
C 2.4313070 1.6249990 -1.4530130
N 3.8007370 1.6249990 -1.6786800
C 4.7029040 1.6249990 -0.6702290
C 4.2995430 1.6249990 0.6041590
C 2.8701050 1.6249990 0.8243640
N 2.0112500 1.6249990 -0.1708960
O 1.6903830 1.6249990 -2.4092590
N 2.3989850 1.6249990 2.0604220
H 4.0848920 1.6249990 -2.6317200
H 5.7382910 1.6249990 -0.9548720
H 4.9943920 1.6249990 1.4196840
H 3.0156050 1.6249990 2.8366040
H 1.4048620 1.6249990 2.2234300
--
0 1
C -2.4313070 -1.6249990 -1.4530130
N -3.8007370 -1.6249990 -1.6786800
C -4.7029040 -1.6249990 -0.6702290
C -4.2995430 -1.6249990 0.6041590
C -2.8701050 -1.6249990 0.8243640
N -2.0112500 -1.6249990 -0.1708960
O -1.6903830 -1.6249990 -2.4092590
N -2.3989850 -1.6249990 2.0604220
H -4.0848920 -1.6249990 -2.6317200
H -5.7382910 -1.6249990 -0.9548720
H -4.9943920 -1.6249990 1.4196840
H -3.0156050 -1.6249990 2.8366040
H -1.4048620 -1.6249990 2.2234300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '44')] = qcdb.Molecule("""
0 1
O -0.4979320 1.6249990 1.9422390
C -1.3595090 1.6249990 1.0916630
N -0.9987390 1.6249990 -0.2553610
C -1.8577170 1.6249990 -1.3106620
N -3.1545590 1.6249990 -1.1831180
C -3.5468800 1.6249990 0.1030790
C -2.7782730 1.6249990 1.2410250
N -3.5769760 1.6249990 2.3678730
C -4.7713760 1.6249990 1.9183280
N -4.8269760 1.6249990 0.5422510
N -1.3040920 1.6249990 -2.5263360
H -0.0072390 1.6249990 -0.4353230
H -5.6628210 1.6249990 2.5121130
H -5.6359190 1.6249990 -0.0329580
H -1.9209400 1.6249990 -3.3022070
H -0.3140390 1.6249990 -2.6726050
--
0 1
O 0.4979320 -1.6249990 1.9422390
C 1.3595090 -1.6249990 1.0916630
N 0.9987390 -1.6249990 -0.2553610
C 1.8577170 -1.6249990 -1.3106620
N 3.1545590 -1.6249990 -1.1831180
C 3.5468800 -1.6249990 0.1030790
C 2.7782730 -1.6249990 1.2410250
N 3.5769760 -1.6249990 2.3678730
C 4.7713760 -1.6249990 1.9183280
N 4.8269760 -1.6249990 0.5422510
N 1.3040920 -1.6249990 -2.5263360
H 0.0072390 -1.6249990 -0.4353230
H 5.6628210 -1.6249990 2.5121130
H 5.6359190 -1.6249990 -0.0329580
H 1.9209400 -1.6249990 -3.3022070
H 0.3140390 -1.6249990 -2.6726050
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '45')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
O -1.5665350 0.5226760 3.1900000
C -1.3848270 -0.6743110 3.1900000
N -0.0830050 -1.1742030 3.1900000
C 0.2658570 -2.4894210 3.1900000
N -0.5995930 -3.4636200 3.1900000
C -1.8707500 -3.0250070 3.1900000
C -2.3395920 -1.7343230 3.1900000
N -3.7206970 -1.7181430 3.1900000
C -4.0590580 -2.9486690 3.1900000
N -2.9784690 -3.8024880 3.1900000
N 1.5747700 -2.7560840 3.1900000
H 0.6453760 -0.4778410 3.1900000
H -5.0634190 -3.3208450 3.1900000
H -2.9886000 -4.7950370 3.1900000
H 1.8398890 -3.7111710 3.1900000
H 2.2750440 -2.0410890 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '46')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
C 3.2985790 0.6087040 3.1900000
N 4.2860790 1.5839520 3.1900000
C 4.0005050 2.9065740 3.1900000
C 2.7324140 3.3293140 3.1900000
C 1.7140620 2.3023070 3.1900000
N 2.0144220 1.0224800 3.1900000
O 3.6366950 -0.5527840 3.1900000
N 0.4371510 2.6477000 3.1900000
H 5.2241270 1.2536560 3.1900000
H 4.8393710 3.5769110 3.1900000
H 2.4810610 4.3708130 3.1900000
H 0.1716470 3.6027840 3.1900000
H -0.2790560 1.9392500 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '47')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6008720 0.1400580
C 1.5033500 -0.3559320 0.0311400
N 0.6443890 0.6596690 -0.0577140
C 1.1104570 1.9032530 -0.1665130
N 2.3570820 2.3026230 -0.2014530
C 3.2027210 1.2786930 -0.1118710
C 2.8736500 -0.0510630 0.0044670
N 4.0080740 -0.8368430 0.0732140
C 4.9747240 -0.0009950 0.0000870
N 4.5630980 1.3018810 -0.1139000
H 0.3540060 2.6652780 -0.2331820
H 6.0151290 -0.2558650 0.0223850
H 5.1340420 2.1112380 -0.1847090
H 0.0593220 -1.7779260 0.1555480
H 1.6856960 -2.3528630 0.2058490
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '48')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8863100 -0.1650310
C -2.7935780 -0.9576130 -0.0837800
N -2.3254430 0.3294740 0.0288250
C -3.0734150 1.4689780 0.1285190
N -4.4253100 1.2496820 0.1093330
C -4.9840910 -0.0010050 -0.0000880
C -4.2544100 -1.1099300 -0.0971060
O -2.6009580 2.5647160 0.2243840
C -4.8233490 -2.4946080 -0.2182500
H -4.9936490 2.0617070 0.1803760
H -1.3198550 0.4553880 0.0398410
H -6.0579390 -0.0189790 -0.0016600
H -4.4932070 -3.1202310 0.6035230
H -4.4917710 -2.9686160 -1.1353540
H -5.9073000 -2.4671550 -0.2167380
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '49')] = qcdb.Molecule("""
0 1
O 1.5241600 -0.5494170 3.3837280
C 1.3439910 0.6454500 3.3087260
N 0.0438450 1.1430380 3.2271380
C -0.3032010 2.4557550 3.1386110
N 0.5626500 3.4294030 3.1191180
C 1.8322280 2.9929620 3.1959900
C 2.2991810 1.7048790 3.2880520
N 3.6791050 1.6903250 3.3455930
C 4.0186060 2.9192810 3.2900230
N 2.9399160 3.7704860 3.1975320
N -1.6107030 2.7204770 3.0698940
H -0.6847300 0.4469420 3.2365720
H 5.0225530 3.2920270 3.3102000
H 2.9511880 4.7614640 3.1419340
H -1.8744930 3.6737330 3.0051240
H -2.3112160 2.0058100 3.0815320
--
0 1
O -2.0303850 -1.8889030 -0.1320850
C -2.7935780 -0.9589290 -0.0670550
N -2.3254430 0.3299270 0.0230710
C -3.0734150 1.4709970 0.1028620
N -4.4253100 1.2514000 0.0875060
C -4.9840910 -0.0010070 -0.0000700
C -4.2544100 -1.1114560 -0.0777210
O -2.6009580 2.5682420 0.1795890
C -4.8233490 -2.4980370 -0.1746800
H -4.9936490 2.0645410 0.1443670
H -1.3198550 0.4560130 0.0318880
H -6.0579390 -0.0190050 -0.0013290
H -4.4932070 -3.1092230 0.6578860
H -4.4917710 -2.9879780 -1.0833720
H -5.9073000 -2.4705620 -0.1736470
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '50')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6030730 0.1120980
C 1.5033500 -0.3564220 0.0249230
N 0.6443890 0.6605760 -0.0461920
C 1.1104570 1.9058690 -0.1332710
N 2.3570820 2.3057880 -0.1612360
C 3.2027210 1.2804500 -0.0895380
C 2.8736500 -0.0511330 0.0035760
N 4.0080740 -0.8379940 0.0585980
C 4.9747240 -0.0009970 0.0000700
N 4.5630980 1.3036710 -0.0911620
H 0.3540060 2.6689420 -0.1866310
H 6.0151290 -0.2562160 0.0179160
H 5.1340420 2.1141400 -0.1478350
H 0.0593220 -1.7803700 0.1244960
H 1.6856960 -2.3560970 0.1647540
--
0 1
C -3.3369590 -0.6409430 3.3908960
N -4.3247580 -1.6157810 3.3763480
C -4.0409560 -2.9359630 3.2899980
C -2.7744220 -3.3565600 3.2141470
C -1.7557370 -2.3300110 3.2303510
N -2.0543620 -1.0525720 3.3148920
O -3.6734450 0.5183010 3.4703070
N -0.4803010 -2.6733740 3.1585030
H -5.2616330 -1.2870980 3.4334500
H -4.8798930 -3.6062030 3.2865630
H -2.5244870 -4.3961080 3.1450650
H -0.2161270 -3.6266280 3.0937180
H 0.2361240 -1.9652240 3.1691180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '51')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6069870 0.0000000
C -1.5033500 -0.3572920 0.0000000
N -0.6443890 0.6621890 0.0000000
C -1.1104570 1.9105230 0.0000000
N -2.3570820 2.3114180 0.0000000
C -3.2027210 1.2835770 0.0000000
C -2.8736500 -0.0512580 0.0000000
N -4.0080740 -0.8400400 0.0000000
C -4.9747240 -0.0009990 0.0000000
N -4.5630980 1.3068540 0.0000000
H -0.3540060 2.6754590 0.0000000
H -6.0151290 -0.2568420 0.0000000
H -5.1340420 2.1193020 0.0000000
H -0.0593220 -1.7847170 0.0000000
H -1.6856960 -2.3618500 0.0000000
--
0 1
O 1.5260840 -0.5520650 3.1800000
C 1.3443760 0.6449210 3.1800000
N 0.0425540 1.1448140 3.1800000
C -0.3063080 2.4600320 3.1800000
N 0.5591430 3.4342300 3.1800000
C 1.8302990 2.9956180 3.1800000
C 2.2991410 1.7049340 3.1800000
N 3.6802460 1.6887540 3.1800000
C 4.0186070 2.9192800 3.1800000
N 2.9380180 3.7730980 3.1800000
N -1.6152210 2.7266950 3.1800000
H -0.6858270 0.4484520 3.1800000
H 5.0229680 3.2914560 3.1800000
H 2.9481490 4.7656470 3.1800000
H -1.8803400 3.6817820 3.1800000
H -2.3154950 2.0117000 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '52')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8935150 0.0000000
C 2.7935780 -0.9612710 0.0000000
N 2.3254430 0.3307330 0.0000000
C 3.0734150 1.4745890 0.0000000
N 4.4253100 1.2544560 0.0000000
C 4.9840910 -0.0010090 0.0000000
C 4.2544100 -1.1141700 0.0000000
O 2.6009580 2.5745130 0.0000000
C 4.8233490 -2.5041370 0.0000000
H 4.9936490 2.0695820 0.0000000
H 1.3198550 0.4571270 0.0000000
H 6.0579390 -0.0190510 0.0000000
H 4.4932070 -3.0557570 0.8731720
H 4.4917710 -3.0562720 -0.8723020
H 5.9073000 -2.4766570 -0.0008860
--
0 1
C -3.3390300 -0.6380930 3.1800000
N -4.3265300 -1.6133420 3.1800000
C -4.0409560 -2.9359630 3.1800000
C -2.7728650 -3.3587040 3.1800000
C -1.7545120 -2.3316960 3.1800000
N -2.0548730 -1.0518690 3.1800000
O -3.6771460 0.5233950 3.1800000
N -0.4776020 -2.6770890 3.1800000
H -5.2645780 -1.2830450 3.1800000
H -4.8798220 -3.6063000 3.1800000
H -2.5215120 -4.4002020 3.1800000
H -0.2120980 -3.6321740 3.1800000
H 0.2386050 -1.9686390 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '53')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8863100 0.1650310
C 2.7935780 -0.9576130 0.0837800
N 2.3254430 0.3294740 -0.0288250
C 3.0734150 1.4689780 -0.1285190
N 4.4253100 1.2496820 -0.1093330
C 4.9840910 -0.0010050 0.0000880
C 4.2544100 -1.1099300 0.0971060
O 2.6009580 2.5647160 -0.2243840
C 4.8233490 -2.4946080 0.2182500
H 4.9936490 2.0617070 -0.1803760
H 1.3198550 0.4553880 -0.0398410
H 6.0579390 -0.0189790 0.0016600
H 4.4932070 -2.9680270 1.1361760
H 4.4917710 -3.1206680 -0.6026110
H 5.9073000 -2.4673100 0.2149720
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '54')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6008720 -0.1400580
C -1.5033500 -0.3559320 -0.0311400
N -0.6443890 0.6596690 0.0577140
C -1.1104570 1.9032530 0.1665130
N -2.3570820 2.3026230 0.2014530
C -3.2027210 1.2786930 0.1118710
C -2.8736500 -0.0510630 -0.0044670
N -4.0080740 -0.8368430 -0.0732140
C -4.9747240 -0.0009950 -0.0000870
N -4.5630980 1.3018810 0.1139000
H -0.3540060 2.6652780 0.2331820
H -6.0151290 -0.2558650 -0.0223850
H -5.1340420 2.1112380 0.1847090
H -0.0593220 -1.7779260 -0.1555480
H -1.6856960 -2.3528630 -0.2058490
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '55')] = qcdb.Molecule("""
0 1
O -1.6803850 -1.8863100 -0.1650310
C -2.4435780 -0.9576130 -0.0837800
N -1.9754430 0.3294740 0.0288250
C -2.7234150 1.4689780 0.1285190
N -4.0753100 1.2496820 0.1093330
C -4.6340910 -0.0010050 -0.0000880
C -3.9044100 -1.1099300 -0.0971060
O -2.2509580 2.5647160 0.2243840
C -4.4733490 -2.4946080 -0.2182500
H -4.6436490 2.0617070 0.1803760
H -0.9698550 0.4553880 0.0398410
H -5.7079390 -0.0189790 -0.0016600
H -4.1432070 -3.1202310 0.6035230
H -4.1417710 -2.9686160 -1.1353540
H -5.5573000 -2.4671550 -0.2167380
--
0 1
O 2.4682050 -0.5383510 3.4250310
C 2.5397670 0.6615740 3.3437800
N 1.4045070 1.4276870 3.2311750
C 1.3398450 2.7892110 3.1314810
N 2.5624500 3.4064220 3.1506670
C 3.7496490 2.7230370 3.2600880
C 3.8111350 1.3970020 3.3571060
O 0.3135610 3.3979790 3.0356160
C 5.0853090 0.6111890 3.4782500
H 2.5449500 4.3974240 3.0796240
H 0.5169590 0.9384830 3.2201590
H 4.6289750 3.3396890 3.2616600
H 5.0964880 0.0341320 4.3961760
H 5.1850460 -0.0902010 2.6573890
H 5.9461980 1.2704040 3.4749720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '56')] = qcdb.Molecule("""
0 1
N 1.3923840 -1.6008720 0.1400580
C 1.8533500 -0.3559320 0.0311400
N 0.9943890 0.6596690 -0.0577140
C 1.4604570 1.9032530 -0.1665130
N 2.7070820 2.3026230 -0.2014530
C 3.5527210 1.2786930 -0.1118710
C 3.2236500 -0.0510630 0.0044670
N 4.3580740 -0.8368430 0.0732140
C 5.3247240 -0.0009950 0.0000870
N 4.9130980 1.3018810 -0.1139000
H 0.7040060 2.6652780 -0.2331820
H 6.3651290 -0.2558650 0.0223850
H 5.4840420 2.1112380 -0.1847090
H 0.4093220 -1.7779260 0.1555480
H 2.0356960 -2.3528630 0.2058490
--
0 1
N -0.1854930 -2.1135550 3.1199420
C -1.2901800 -1.3773270 3.2288600
N -1.1922210 -0.0508040 3.3177130
C -2.3002380 0.6813290 3.4265130
N -3.5435230 0.2716780 3.4614530
C -3.6258080 -1.0537530 3.3718710
C -2.5779730 -1.9361250 3.2555330
N -3.0338720 -3.2386320 3.1867860
C -4.3072070 -3.1306000 3.2599130
N -4.7400060 -1.8346030 3.3739000
H -2.1361640 1.7424510 3.4931820
H -4.9991040 -3.9483280 3.2376150
H -5.6776380 -1.5154120 3.4447090
H 0.7138900 -1.6789650 3.1044520
H -0.2639350 -3.1000580 3.0541510
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '57')] = qcdb.Molecule("""
0 1
N -1.4867430 1.6920980 -2.3336600
C -1.5399110 1.6049230 -1.0055780
N -0.4087210 1.5338080 -0.3037890
C -0.4671620 1.4467290 1.0245780
N -1.5291910 1.4187640 1.7901520
C -2.6502880 1.4904620 1.0763140
C -2.7488050 1.5835760 -0.2917850
N -4.0708590 1.6385980 -0.6895780
C -4.7315520 1.5800700 0.4051650
N -3.9369080 1.4888380 1.5187780
H 0.4880690 1.3933690 1.5165470
H -5.7999030 1.5979160 0.4839400
H -4.2294590 1.4321650 2.4660110
H -0.6065830 1.7044960 -2.8060620
H -2.3312660 1.7447540 -2.8510340
--
0 1
N 1.4867430 -1.6920980 -2.3336600
C 1.5399110 -1.6049230 -1.0055780
N 0.4087210 -1.5338080 -0.3037890
C 0.4671620 -1.4467290 1.0245780
N 1.5291910 -1.4187640 1.7901520
C 2.6502880 -1.4904620 1.0763140
C 2.7488050 -1.5835760 -0.2917850
N 4.0708590 -1.6385980 -0.6895780
C 4.7315520 -1.5800700 0.4051650
N 3.9369080 -1.4888380 1.5187780
H -0.4880690 -1.3933690 1.5165470
H 5.7999030 -1.5979160 0.4839400
H 4.2294590 -1.4321650 2.4660110
H 0.6065830 -1.7044960 -2.8060620
H 2.3312660 -1.7447540 -2.8510340
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '58')] = qcdb.Molecule("""
0 1
O 1.3473090 1.4479140 -0.7794320
C 2.3605260 1.5129430 -0.1308130
N 2.3135810 1.6030670 1.2396240
C 3.3775550 1.6828570 2.0937100
N 4.5954240 1.6675010 1.4671030
C 4.7398420 1.5799270 0.1033200
C 3.7027260 1.5022770 -0.7272960
O 3.2672880 1.7595820 3.2832490
C 3.8153430 1.4053200 -2.2218250
H 5.3872210 1.7243610 2.0648200
H 1.3961730 1.6118840 1.6702820
H 5.7555700 1.5786680 -0.2456340
H 3.3501360 0.4957970 -2.5852230
H 3.3109870 2.2369830 -2.7010640
H 4.8546930 1.4081210 -2.5307720
--
0 1
O -1.3473090 -1.4479140 -0.7794320
C -2.3605260 -1.5129430 -0.1308130
N -2.3135810 -1.6030670 1.2396240
C -3.3775550 -1.6828570 2.0937100
N -4.5954240 -1.6675010 1.4671030
C -4.7398420 -1.5799270 0.1033200
C -3.7027260 -1.5022770 -0.7272960
O -3.2672880 -1.7595820 3.2832490
C -3.8153430 -1.4053200 -2.2218250
H -5.3872210 -1.7243610 2.0648200
H -1.3961730 -1.6118840 1.6702820
H -5.7555700 -1.5786680 -0.2456340
H -3.3109870 -2.2369830 -2.7010640
H -3.3501360 -0.4957970 -2.5852230
H -4.8546930 -1.4081210 -2.5307720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '59')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.6069870 0.0000000
C -1.8533500 -0.3572920 0.0000000
N -0.9943890 0.6621890 0.0000000
C -1.4604570 1.9105230 0.0000000
N -2.7070820 2.3114180 0.0000000
C -3.5527210 1.2835770 0.0000000
C -3.2236500 -0.0512580 0.0000000
N -4.3580740 -0.8400400 0.0000000
C -5.3247240 -0.0009990 0.0000000
N -4.9130980 1.3068540 0.0000000
H -0.7040060 2.6754590 0.0000000
H -6.3651290 -0.2568420 0.0000000
H -5.4840420 2.1193020 0.0000000
H -0.4093220 -1.7847170 0.0000000
H -2.0356960 -2.3618500 0.0000000
--
0 1
O 2.4724400 -0.5441800 3.2400000
C 2.5419170 0.6586150 3.2400000
N 1.4037670 1.4287050 3.2400000
C 1.3365470 2.7937510 3.2400000
N 2.5596440 3.4102840 3.2400000
C 3.7496510 2.7230340 3.2400000
C 3.8136270 1.3935720 3.2400000
O 0.3078020 3.4059050 3.2400000
C 5.0909100 0.6034800 3.2400000
H 2.5403210 4.4037960 3.2400000
H 0.5159370 0.9398900 3.2400000
H 4.6290170 3.3396300 3.2400000
H 5.1480540 -0.0368430 4.1131720
H 5.1471940 -0.0381040 2.3676980
H 5.9516930 1.2628420 3.2391140
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '60')] = qcdb.Molecule("""
0 1
N -0.1818990 -2.1185030 3.2400000
C -1.2893810 -1.3784270 3.2400000
N -1.1937020 -0.0487650 3.2400000
C -2.3045120 0.6872100 3.2400000
N -3.5486930 0.2787930 3.2400000
C -3.6286790 -1.0498020 3.2400000
C -2.5778590 -1.9362830 3.2400000
N -3.0319930 -3.2412190 3.2400000
C -4.3072050 -3.1306030 3.2400000
N -4.7429290 -1.8305800 3.2400000
H -2.1421480 1.7506870 3.2400000
H -4.9985290 -3.9491190 3.2400000
H -5.6823780 -1.5088880 3.2400000
H 0.7178820 -1.6844600 3.2400000
H -0.2586520 -3.1073290 3.2400000
--
0 1
O 1.6803850 -1.8935150 0.0000000
C 2.4435780 -0.9612710 0.0000000
N 1.9754430 0.3307330 0.0000000
C 2.7234150 1.4745890 0.0000000
N 4.0753100 1.2544560 0.0000000
C 4.6340910 -0.0010090 0.0000000
C 3.9044100 -1.1141700 0.0000000
O 2.2509580 2.5745130 0.0000000
C 4.4733490 -2.5041370 0.0000000
H 4.6436490 2.0695820 0.0000000
H 0.9698550 0.4571270 0.0000000
H 5.7079390 -0.0190510 0.0000000
H 4.1432070 -3.0557570 0.8731720
H 4.1417710 -3.0562720 -0.8723020
H 5.5573000 -2.4766570 -0.0008860
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '61')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 3.5239990 12.7489964 2.4389993
N 4.9449986 12.8539964 2.2449994
C 5.8529984 11.8509967 2.0569994
N 7.1019980 12.2539966 2.0409994
C 6.9979980 13.6219962 2.2459994
C 7.9829978 14.6269959 2.3449993
N 9.3019974 14.3749960 2.2649994
N 7.5379979 15.8889955 2.5409993
C 6.2229983 16.1279955 2.6329993
N 5.2169985 15.2499957 2.5399993
C 5.6739984 14.0109961 2.3699993
H 9.6079353 13.4170922 2.2138804
H 9.9620862 15.1183578 2.4869203
H 5.5326604 10.8241690 1.9326585
H 5.9571083 17.1738952 2.7655592
H 3.0968081 13.7487911 2.3499173
H 3.0789261 12.1004316 1.6796125
H 3.2840151 12.3521085 3.4311880
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '62')] = qcdb.Molecule("""
0 1
C 3.0629991 16.2869954 -0.5529998
N 4.3679988 15.6949956 -0.7379998
C 5.4889985 16.5069954 -0.6549998
O 5.3979985 17.7169950 -0.4679999
N 6.6749981 15.8589956 -0.7949998
C 6.8699981 14.5069959 -0.9999997
O 8.0199978 14.0679961 -1.0789997
C 5.6559984 13.7139962 -1.1019997
C 5.7709984 12.2569966 -1.4029996
C 4.4739987 14.3319960 -0.9639997
H 7.5313379 16.4637704 -0.7443448
H 6.3741672 11.7424167 -0.6472968
H 4.7881707 11.7797217 -1.4448876
H 6.2751442 12.0930036 -2.3618343
H 3.5293140 13.8026561 -1.0289747
H 2.3790703 15.9479585 -1.3364316
H 2.6423583 16.0249025 0.4245489
H 3.1730521 17.3682771 -0.6086068
--
0 1
C 8.5479976 21.7979939 2.3959993
N 9.1919974 20.5259942 2.6589993
C 8.4229976 19.3799946 2.5429993
O 7.2269980 19.3959946 2.3429993
N 9.0979975 18.2049949 2.7069992
C 10.4579971 18.0869949 2.9379992
O 10.9519969 16.9699952 3.0289992
C 11.2079969 19.3189946 3.0599991
C 12.6759964 19.2659946 3.3619991
C 10.5419970 20.4719943 2.8979992
H 7.4741299 21.6651819 2.5133333
H 8.9049615 22.5495287 3.1049871
H 8.7503455 22.1445498 1.3760436
H 11.0339909 21.4374260 2.9618352
H 13.2133913 18.6878638 2.6029743
H 13.1061963 20.2701373 3.4050200
H 12.8619664 18.7673097 4.3193848
H 8.5371916 17.3217571 2.6353613
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '63')] = qcdb.Molecule("""
0 1
C 10.7049970 9.6579973 11.8009967
N 11.0689969 11.0699969 11.9839966
C 10.2199971 12.1419966 11.9589966
N 10.8209970 13.3089963 12.1399966
C 12.1439966 12.9639964 12.2549966
C 13.3189963 13.7529961 12.4509965
O 13.3749963 14.9839958 12.5499965
N 14.4609959 13.0409963 12.5269965
C 14.5119959 11.6719967 12.4369965
N 15.7519956 11.1639969 12.5359965
N 13.4609962 10.8809970 12.2549966
C 12.3209965 11.5909968 12.1779966
H 11.6087247 9.0642815 11.9411017
H 10.3130781 9.4887283 10.7941210
H 9.9552752 9.3611644 12.5389945
H 15.3408647 13.5779012 12.6455145
H 9.1538724 12.0114576 11.8260867
H 15.8197976 10.1594152 12.5501065
H 16.5616854 11.7259467 12.8207994
--
0 1
C 18.8919947 9.6579973 9.7709973
N 18.5279948 11.0699969 9.5879973
C 19.3769946 12.1419966 9.6129973
N 18.7759947 13.3089963 9.4319974
C 17.4529951 12.9639964 9.3169974
C 16.2779954 13.7529961 9.1209974
O 16.2219955 14.9839958 9.0219975
N 15.1359958 13.0409963 9.0449975
C 15.0849958 11.6719967 9.1349974
N 13.8449961 11.1639969 9.0359975
N 16.1359955 10.8809970 9.3169974
C 17.2759952 11.5909968 9.3939974
H 14.2561290 13.5779002 8.9264415
H 13.0353973 11.7259537 8.7509445
H 13.7773141 10.1594092 9.0213535
H 17.9866610 9.0649795 9.6385253
H 19.2909706 9.4904943 10.7753660
H 19.6360815 9.3587324 9.0282525
H 20.4431063 12.0114766 9.7460263
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '64')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
N 19.2499946 14.4959959 12.7549964
C 18.0179950 13.8469961 12.8649964
O 17.9949950 12.6419965 13.1599963
N 16.9029953 14.5549959 12.6909964
C 16.9229953 15.9259955 12.3859965
N 15.7659956 16.5099954 12.2369966
C 18.1749949 16.5639954 12.3049966
C 19.2759946 15.8539956 12.4789965
H 20.2269943 16.4009954 12.3929965
H 18.2949949 17.6379951 12.1019966
H 14.9229958 15.9769955 12.3109965
H 15.7219956 17.4909951 12.0479966
C 20.4910143 13.7460371 12.9439604
H 20.1968603 12.7260934 13.1854983
H 21.0918101 13.7537151 12.0291826
H 21.0763281 14.1698120 13.7655701
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '65')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
C 18.8920000 9.6580000 9.7710000
N 18.5280000 11.0700000 9.5880000
C 19.3770000 12.1420000 9.6130000
N 18.7760000 13.3090000 9.4320000
C 17.4530000 12.9640000 9.3170000
C 16.2780000 13.7530000 9.1210000
O 16.2220000 14.9840000 9.0220000
N 15.1360000 13.0410000 9.0450000
C 15.0850000 11.6720000 9.1350000
N 13.8450000 11.1640000 9.0360000
N 16.1360000 10.8810000 9.3170000
C 17.2760000 11.5910000 9.3940000
H 14.2561290 13.5779040 8.9264920
H 13.0354310 11.7259420 8.7508330
H 13.7773690 10.1594100 9.0211800
H 17.9880060 9.0643740 9.6322420
H 19.2851540 9.4890700 10.7774400
H 19.6407390 9.3607520 9.0321720
H 20.4431070 12.0114850 9.7460660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '66')] = qcdb.Molecule("""
0 1
C 9.1690000 13.6920000 8.6010000
N 10.3470000 14.4960000 8.8170000
C 11.5790000 13.8470000 8.7070000
O 11.6020000 12.6420000 8.4120000
N 12.6940000 14.5550000 8.8810000
C 12.6740000 15.9260000 9.1860000
N 13.8310000 16.5100000 9.3350000
C 11.4220000 16.5640000 9.2670000
C 10.3210000 15.8540000 9.0930000
H 9.1403680 12.8642760 9.3131620
H 8.2785600 14.3117950 8.7260530
H 9.1795130 13.2651190 7.5953140
H 11.3501160 17.6252970 9.4808030
H 9.3300790 16.2918180 9.1491660
H 14.7113690 15.9651740 9.2135180
H 13.8876420 17.4962710 9.5342540
--
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '67')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '68')] = qcdb.Molecule("""
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '69')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '70')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '71')] = qcdb.Molecule("""
0 1
O -1.2390176 -2.5490521 0.6548924
C -1.0284571 -1.3714583 0.9008651
N -0.0318511 -0.9949528 1.8248233
C 0.3841646 0.2706806 2.1182164
N -0.1910285 1.3513281 1.6527710
C -1.2092305 1.0513624 0.8089237
C -1.6565083 -0.1915101 0.3706051
N -2.6541580 -0.0639048 -0.5661534
C -2.8177333 1.2431899 -0.6803818
N -1.9753657 1.9574414 0.1290579
N 1.4525454 0.3558875 2.9872621
H 0.4866119 -1.7695272 2.2174674
H -3.5338415 1.7253425 -1.3240899
H -1.9138820 2.9580997 0.2181746
H 1.7298659 1.3225221 3.0797421
H 2.2376547 -0.1901480 2.6476325
--
0 1
C 2.2123373 -0.0590839 -0.4645529
N 2.1205577 1.1822577 -1.1169007
C 1.2003987 1.4553092 -2.0711004
C 0.3300220 0.4917324 -2.4615962
C 0.4626198 -0.7818118 -1.8195186
N 1.3658705 -1.0412675 -0.8919664
O 3.0203933 -0.1851683 0.4516286
N -0.3645719 -1.7922584 -2.1870353
H 2.7522574 1.8928231 -0.7832658
H 1.2077958 2.4531651 -2.4849122
H -0.4090987 0.6756472 -3.2236499
H -1.2619684 -1.5144746 -2.5505972
H -0.4171470 -2.5417743 -1.5096178
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '72')] = qcdb.Molecule("""
0 1
O -1.6144948 -2.7570519 -0.2060980
C -1.1842160 -1.7852952 0.3968248
N -0.0652525 -1.9057030 1.2481671
C 0.5967440 -0.9002665 1.8887926
N 0.1973220 0.3471640 1.8866901
C -0.9263931 0.5128083 1.1439041
C -1.6366545 -0.4213094 0.3921297
N -2.6684418 0.1720265 -0.2870095
C -2.5827038 1.4477282 0.0619406
N -1.5537529 1.7066351 0.9265244
N 1.7211367 -1.2836170 2.5904485
H 0.3310406 -2.8355291 1.2850314
H -3.2456406 2.2307336 -0.2693001
C -1.1552104 2.9806914 1.4836593
H 2.1791458 -0.4655292 2.9651066
H 2.3669253 -1.7840635 1.9899163
H -0.1579041 3.2445863 1.1378850
H -1.8652414 3.7371235 1.1620610
H -1.1470859 2.9258678 2.5690805
--
0 1
C 1.9196368 -0.2893692 -0.7963336
N 1.8412129 1.1192423 -0.8402021
C 0.8544988 1.7477483 -1.5184980
C -0.1003666 1.0437295 -2.1815479
C -0.0071603 -0.3764662 -2.1036673
N 0.9713285 -0.9972682 -1.4703627
O 2.8208711 -0.8014248 -0.1303355
N -0.9502154 -1.1428384 -2.7108142
C 2.8076706 1.8576425 -0.0455040
H 0.8766575 2.8292157 -1.5014613
H -0.8834995 1.5417865 -2.7295416
H -1.8580411 -0.7155517 -2.8051204
H -0.9899772 -2.0895195 -2.3567972
H 3.8087243 1.5028551 -0.2699237
H 2.6081184 1.6978177 1.0127736
H 2.7199672 2.9140199 -0.2854865
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '73')] = qcdb.Molecule("""
0 1
N 0.2793014 2.4068393 -0.6057517
C -1.0848570 2.4457461 -0.5511608
H -1.6594403 3.0230294 -1.2560905
N -1.5977117 1.7179877 0.4287543
C -0.4897255 1.1714358 1.0301910
C -0.3461366 0.2914710 2.1172343
N -1.4187090 -0.1677767 2.8101441
H -1.2388750 -0.9594802 3.4047578
H -2.2918734 -0.1788223 2.3073619
N 0.8857630 -0.0700763 2.4919494
C 1.9352348 0.4072878 1.7968022
H 2.9060330 0.0788414 2.1458181
N 1.9409775 1.2242019 0.7402202
C 0.6952186 1.5779858 0.4063984
H 0.8610073 2.8298045 -1.3104502
--
0 1
N 1.2754606 -0.6478993 -1.9779104
C 1.4130533 -1.5536850 -0.9550667
H 2.4258769 -1.8670780 -0.7468778
C 0.3575976 -2.0239499 -0.2530575
C 0.4821292 -3.0179494 0.8521221
H 0.1757705 -2.5756065 1.7986281
H -0.1601691 -3.8770412 0.6639498
H 1.5112443 -3.3572767 0.9513659
C -0.9684711 -1.5298112 -0.5939792
O -2.0029280 -1.8396957 -0.0199453
N -0.9956916 -0.6383870 -1.6720420
H -1.9014057 -0.2501720 -1.8985760
C 0.0684702 -0.1191762 -2.3763759
O -0.0397875 0.7227006 -3.2531083
H 2.0853289 -0.2760176 -2.4454577
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '74')] = qcdb.Molecule("""
0 1
N -0.3455004 1.7703632 1.4950792
C -1.6474050 1.3634505 1.5386766
H -2.4523693 2.0803127 1.5703490
N -1.8053639 0.0450392 1.5375118
C -0.5193842 -0.4240596 1.4834056
C 0.0152186 -1.7249725 1.4821754
N -0.7782381 -2.8218524 1.5417158
H -0.3281681 -3.6995564 1.3432557
H -1.7192874 -2.7111068 1.1983318
N 1.3452903 -1.8718583 1.4651757
C 2.1159101 -0.7701212 1.4213994
H 3.1830548 -0.9527061 1.4028830
N 1.7419114 0.5131994 1.4043323
C 0.4096081 0.6245403 1.4501833
C 0.1512980 3.1326941 1.4689984
H -0.0424219 3.5749692 0.4946347
H -0.3347704 3.7141185 2.2479916
H 1.2201020 3.0964449 1.6609900
--
0 1
N 0.8076098 1.0547322 -1.6591556
C 1.2548662 -0.2426109 -1.7103022
H 2.3275169 -0.3452707 -1.8079765
C 0.4450062 -1.3265062 -1.6516166
C 0.9521849 -2.7269269 -1.7314581
H 0.7400336 -3.2617591 -0.8079097
H 0.4633129 -3.2624261 -2.5442266
H 2.0282910 -2.7371647 -1.8922437
C -0.9813923 -1.1031917 -1.5070970
O -1.8286775 -1.9792278 -1.3834794
N -1.3482304 0.2425066 -1.5277277
H -2.3301840 0.4328271 -1.3774912
C -0.5338001 1.3531361 -1.5364698
O -0.9719491 2.4936714 -1.4469221
C 1.7769330 2.1404377 -1.6201082
H 2.2553696 2.1734198 -0.6418794
H 2.5269559 1.9765736 -2.3901046
H 1.2518503 3.0690500 -1.8139519
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '75')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.9181960 -0.9215090 3.4000000
N -0.3693690 -1.5141310 3.4000000
C -1.5252510 -0.8082010 3.4000000
C -1.4858600 0.5568310 3.4000000
C -0.1723650 1.1455600 3.4000000
N 0.9526960 0.4540270 3.4000000
O 1.9020460 -1.6508420 3.4000000
N -0.0596430 2.5018840 3.4000000
H -0.3693760 -2.5309310 3.4000000
H -2.4533460 -1.3813360 3.4000000
H -2.3977890 1.1506030 3.4000000
H -0.8684590 3.1052900 3.4000000
H 0.8696630 2.9040660 3.4000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '76')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.2571480 0.3344260 3.3000000
N 1.1265920 -1.0769480 3.3000000
C -0.0627020 -1.7250070 3.3000000
C -1.2251600 -1.0083780 3.3000000
C -1.0782670 0.4235070 3.3000000
N 0.0831490 1.0520730 3.3000000
O 2.3806940 0.8218000 3.3000000
N -2.1965170 1.1992900 3.3000000
H 2.0071630 -1.5853550 3.3000000
H -0.0304010 -2.8153280 3.3000000
H -2.1953460 -1.5012440 3.3000000
H -3.1234900 0.8005370 3.3000000
H -2.0801630 2.2051830 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '77')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.3389520 1.2559350 3.3000000
N 1.4959600 0.4371830 3.3000000
C 1.4625490 -0.9168050 3.3000000
C 0.2607010 -1.5652080 3.3000000
C -0.9059010 -0.7220530 3.3000000
N -0.8695470 0.5980460 3.3000000
O 0.4786470 2.4726420 3.3000000
N -2.1368730 -1.3025950 3.3000000
H 2.3765390 0.9455770 3.3000000
H 2.4229450 -1.4339920 3.3000000
H 0.2024430 -2.6518470 3.3000000
H -2.2550290 -2.3047530 3.3000000
H -2.9498260 -0.6988840 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '78')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 0.9215090 3.3000000
N 0.3693690 1.5141310 3.3000000
C 1.5252510 0.8082010 3.3000000
C 1.4858600 -0.5568310 3.3000000
C 0.1723650 -1.1455600 3.3000000
N -0.9526960 -0.4540270 3.3000000
O -1.9020460 1.6508420 3.3000000
N 0.0596430 -2.5018840 3.3000000
H 0.3693760 2.5309310 3.3000000
H 2.4533460 1.3813360 3.3000000
H 2.3977890 -1.1506030 3.3000000
H 0.8684590 -3.1052900 3.3000000
H -0.8696630 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '79')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.9181950 -0.9215090 3.3000000
N 0.6306310 -1.5141310 3.3000000
C -0.5252510 -0.8082010 3.3000000
C -0.4858600 0.5568310 3.3000000
C 0.8276350 1.1455600 3.3000000
N 1.9526960 0.4540270 3.3000000
O 2.9020460 -1.6508420 3.3000000
N 0.9403570 2.5018840 3.3000000
H 0.6306240 -2.5309310 3.3000000
H -1.4533460 -1.3813360 3.3000000
H -1.3977890 1.1506030 3.3000000
H 0.1315410 3.1052900 3.3000000
H 1.8696630 2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '80')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.9181960 0.0784910 3.3000000
N -0.3693690 -0.5141310 3.3000000
C -1.5252510 0.1917990 3.3000000
C -1.4858600 1.5568310 3.3000000
C -0.1723650 2.1455600 3.3000000
N 0.9526960 1.4540270 3.3000000
O 1.9020460 -0.6508420 3.3000000
N -0.0596430 3.5018840 3.3000000
H -0.3693760 -1.5309310 3.3000000
H -2.4533460 -0.3813360 3.3000000
H -2.3977890 2.1506030 3.3000000
H -0.8684590 4.1052890 3.3000000
H 0.8696630 3.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '81')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 2.9181950 -2.9215090 3.3000000
N 1.6306310 -3.5141310 3.3000000
C 0.4747490 -2.8082010 3.3000000
C 0.5141400 -1.4431690 3.3000000
C 1.8276350 -0.8544400 3.3000000
N 2.9526960 -1.5459730 3.3000000
O 3.9020460 -3.6508420 3.3000000
N 1.9403570 0.5018840 3.3000000
H 1.6306240 -4.5309310 3.3000000
H -0.4533460 -3.3813360 3.3000000
H -0.3977890 -0.8493970 3.3000000
H 1.1315410 1.1052900 3.3000000
H 2.8696630 0.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '82')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.0818040 0.9215090 3.3000000
N 1.3693690 1.5141310 3.3000000
C 2.5252510 0.8082010 3.3000000
C 2.4858600 -0.5568310 3.3000000
C 1.1723650 -1.1455600 3.3000000
N 0.0473040 -0.4540270 3.3000000
O -0.9020460 1.6508420 3.3000000
N 1.0596430 -2.5018840 3.3000000
H 1.3693760 2.5309310 3.3000000
H 3.4533460 1.3813360 3.3000000
H 3.3977890 -1.1506030 3.3000000
H 1.8684590 -3.1052900 3.3000000
H 0.1303370 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '83')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -1.9181960 0.9215090 3.3000000
N -0.6306310 1.5141310 3.3000000
C 0.5252510 0.8082010 3.3000000
C 0.4858600 -0.5568310 3.3000000
C -0.8276350 -1.1455600 3.3000000
N -1.9526960 -0.4540270 3.3000000
O -2.9020460 1.6508420 3.3000000
N -0.9403570 -2.5018840 3.3000000
H -0.6306240 2.5309320 3.3000000
H 1.4533460 1.3813360 3.3000000
H 1.3977890 -1.1506030 3.3000000
H -0.1315410 -3.1052900 3.3000000
H -1.8696630 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '84')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 1.9215090 3.3000000
N 0.3693690 2.5141310 3.3000000
C 1.5252510 1.8082010 3.3000000
C 1.4858600 0.4431690 3.3000000
C 0.1723650 -0.1455600 3.3000000
N -0.9526960 0.5459730 3.3000000
O -1.9020460 2.6508420 3.3000000
N 0.0596430 -1.5018840 3.3000000
H 0.3693760 3.5309310 3.3000000
H 2.4533460 2.3813360 3.3000000
H 2.3977890 -0.1506030 3.3000000
H 0.8684590 -2.1052900 3.3000000
H -0.8696630 -1.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '85')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 -0.0784910 3.3000000
N 0.3693690 0.5141310 3.3000000
C 1.5252510 -0.1917990 3.3000000
C 1.4858600 -1.5568310 3.3000000
C 0.1723650 -2.1455600 3.3000000
N -0.9526960 -1.4540270 3.3000000
O -1.9020460 0.6508420 3.3000000
N 0.0596430 -3.5018840 3.3000000
H 0.3693760 1.5309310 3.3000000
H 2.4533460 0.3813360 3.3000000
H 2.3977890 -2.1506030 3.3000000
H 0.8684590 -4.1052900 3.3000000
H -0.8696630 -3.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '86')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.0818040 -1.0784910 3.3000000
N 2.3693690 -0.4858690 3.3000000
C 3.5252510 -1.1917990 3.3000000
C 3.4858600 -2.5568310 3.3000000
C 2.1723650 -3.1455600 3.3000000
N 1.0473050 -2.4540270 3.3000000
O 0.0979540 -0.3491580 3.3000000
N 2.0596430 -4.5018840 3.3000000
H 2.3693760 0.5309310 3.3000000
H 4.4533460 -0.6186640 3.3000000
H 4.3977890 -3.1506030 3.3000000
H 2.8684590 -5.1052900 3.3000000
H 1.1303370 -4.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '87')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 2.9215090 3.3000000
N 0.3693690 3.5141310 3.3000000
C 1.5252510 2.8082010 3.3000000
C 1.4858600 1.4431690 3.3000000
C 0.1723660 0.8544400 3.3000000
N -0.9526960 1.5459730 3.3000000
O -1.9020460 3.6508420 3.3000000
N 0.0596430 -0.5018840 3.3000000
H 0.3693760 4.5309310 3.3000000
H 2.4533460 3.3813360 3.3000000
H 2.3977890 0.8493970 3.3000000
H 0.8684590 -1.1052900 3.3000000
H -0.8696630 -0.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '88')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 -0.9215090 3.3000000
N 0.3693690 -1.5141310 3.3000000
C 1.5252510 -0.8082010 3.3000000
C 1.4858600 0.5568310 3.3000000
C 0.1723650 1.1455600 3.3000000
N -0.9526960 0.4540270 3.3000000
O -1.9020460 -1.6508420 3.3000000
N 0.0596430 2.5018840 3.3000000
H 0.3693760 -2.5309310 3.3000000
H 2.4533460 -1.3813360 3.3000000
H 2.3977890 1.1506030 3.3000000
H 0.8684590 3.1052900 3.3000000
H -0.8696630 2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '89')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
N 2.0400330 1.0244080 3.3000000
C 3.1994670 0.3550080 3.3000000
N 3.4122460 -0.9805480 3.3000000
C 2.3435740 -1.7936000 3.3000000
C 1.0536410 -1.2239910 3.3000000
C 1.0005580 0.1728010 3.3000000
N -0.3470950 0.4608810 3.3000000
C -1.0123290 -0.7389400 3.3000000
N -0.2054550 -1.7913170 3.3000000
N 2.5320410 -3.1331060 3.3000000
H 4.1005830 0.9647080 3.3000000
H -2.0940000 -0.7905040 3.3000000
H -0.7522350 1.3896650 3.3000000
H 3.4734100 -3.4971280 3.3000000
H 1.7398820 -3.7580580 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '90')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920590 0.0000000
C 0.2392880 -1.4664590 0.0000000
N 1.4831650 -0.7585720 0.0000000
C 1.6585390 0.6049970 0.0000000
N 0.6694090 1.4698410 0.0000000
C -0.5424070 0.8439610 0.0000000
C -0.8433090 -0.5171760 0.0000000
N -2.2044110 -0.7367510 0.0000000
C -2.7203200 0.4816210 0.0000000
N -1.7621040 1.4687240 0.0000000
N 2.9429720 1.0619610 0.0000000
H 2.2894380 -1.3782560 0.0000000
H -3.7780480 0.7114080 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608880 0.0000000
H 3.7442410 0.4525240 0.0000000
--
0 1
O -3.4927120 1.0318190 3.3000000
C -2.2857320 0.8189950 3.3000000
N -1.8045960 -0.5289080 3.3000000
C -0.4921960 -0.9383990 3.3000000
N 0.5312690 -0.1144750 3.3000000
C 0.1253280 1.1876140 3.3000000
C -1.1628790 1.7203040 3.3000000
N -1.1427660 3.0988560 3.3000000
C 0.1466840 3.3953590 3.3000000
N 0.9523970 2.2802920 3.3000000
N -0.2652140 -2.2826700 3.3000000
H -2.5548740 -1.2153250 3.3000000
H 0.5566520 4.3971160 3.3000000
H 1.9651610 2.2473370 3.3000000
H 0.6944640 -2.5926540 3.3000000
H -1.0045320 -2.9659380 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '91')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
C 0.2481770 -2.1847000 3.3000000
N 0.9375100 -0.9462160 3.3000000
C 0.3220220 0.2602560 3.3000000
C -1.0420250 0.3253500 3.3000000
C -1.7294600 -0.9392870 3.3000000
N -1.1259700 -2.1139300 3.3000000
O 0.9001510 -3.2214350 3.3000000
N -3.0904320 -0.9479780 3.3000000
H 1.9513330 -1.0239520 3.3000000
H 0.9644400 1.1418140 3.3000000
H -1.5643350 1.2800070 3.3000000
H -3.6302290 -0.0953930 3.3000000
H -3.5624890 -1.8438140 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '92')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
N -2.9334410 0.9976940 -3.3000000
C -2.6871670 2.3136480 -3.3000000
N -1.4981170 2.9579650 -3.3000000
C -0.3762570 2.2200360 -3.3000000
C -0.4850590 0.8141390 -3.3000000
C -1.7849120 0.3001090 -3.3000000
N -1.6089980 -1.0667170 -3.3000000
C -0.2563330 -1.2956460 -3.3000000
N 0.4682790 -0.1850240 -3.3000000
N 0.8245940 2.8427340 -3.3000000
H -3.5615660 2.9610820 -3.3000000
H 0.1515920 -2.2987750 -3.3000000
H -2.3504750 -1.7573600 -3.3000000
H 0.8552610 3.8515680 -3.3000000
H 1.6771880 2.3031370 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '93')] = qcdb.Molecule("""
0 1
C -1.2210000 -0.4488000 0.0000000
N -1.2210000 0.9686000 0.0000000
C -0.0964540 1.7234490 0.0000000
C 1.1270700 1.1169400 0.0000000
C 1.1126920 -0.3223880 0.0000000
N 0.0141100 -1.0552590 0.0000000
O -2.2948780 -1.0375910 0.0000000
N 2.2976450 -0.9918710 0.0000000
H -2.1446570 1.3937360 0.0000000
H -0.2290470 2.8061600 0.0000000
H 2.0477340 1.6970750 0.0000000
H 3.1839480 -0.5094300 0.0000000
H 2.2744390 -2.0042050 0.0000000
--
0 1
C 0.8210000 1.4488000 3.3000000
N 0.8210000 0.0314000 3.3000000
C -0.3035460 -0.7234490 3.3000000
C -1.5270700 -0.1169400 3.3000000
C -1.5126920 1.3223880 3.3000000
N -0.4141100 2.0552590 3.3000000
O 1.8948780 2.0375920 3.3000000
N -2.6976450 1.9918700 3.3000000
H 1.7446570 -0.3937350 3.3000000
H -0.1709520 -1.8061600 3.3000000
H -2.4477340 -0.6970760 3.3000000
H -3.5839490 1.5094290 3.3000000
H -2.6744400 3.0042040 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '94')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
O 0.8290540 -2.7349420 3.3000000
C 0.6180080 -1.5257000 3.3000000
N 1.6882380 -0.6117020 3.3000000
C 1.6473780 0.7731900 3.3000000
N 0.3483000 1.2653660 3.3000000
C -0.7746550 0.4682380 3.3000000
C -0.6885800 -0.8807150 3.3000000
O 2.6370680 1.4932450 3.3000000
H -1.5708660 -1.5073280 3.3000000
H 2.6203010 -1.0186580 3.3000000
H 0.2745460 2.2757330 3.3000000
H -1.7216600 0.9981100 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '95')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
C 2.5140740 0.5212730 -3.3000000
N 1.2263530 1.1135940 -3.3000000
C 0.0706410 0.4073870 -3.3000000
C 0.1103570 -0.9576360 -3.3000000
C 1.4239940 -1.5460500 -3.3000000
N 2.5488900 -0.8542470 -3.3000000
O 3.4977500 1.2508430 -3.3000000
N 1.5370420 -2.9023480 -3.3000000
H 1.2261020 2.1303940 -3.3000000
H -0.8575930 0.9802990 -3.3000000
H -0.8014280 -1.5516270 -3.3000000
H 0.7283690 -3.5059470 -3.3000000
H 2.4664440 -3.3043070 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '96')] = qcdb.Molecule("""
0 1
C -1.2210000 -0.4488000 0.0000000
N -1.2210000 0.9686000 0.0000000
C -0.0964540 1.7234490 0.0000000
C 1.1270700 1.1169400 0.0000000
C 1.1126920 -0.3223880 0.0000000
N 0.0141100 -1.0552590 0.0000000
O -2.2948780 -1.0375910 0.0000000
N 2.2976450 -0.9918710 0.0000000
H -2.1446570 1.3937360 0.0000000
H -0.2290470 2.8061600 0.0000000
H 2.0477340 1.6970750 0.0000000
H 3.1839480 -0.5094300 0.0000000
H 2.2744390 -2.0042050 0.0000000
--
0 1
O -0.2290330 2.1280320 3.3000000
C -0.3146620 0.9035020 3.3000000
N 0.8438480 0.1043590 3.3000000
C 0.9455600 -1.2773930 3.3000000
N -0.2960490 -1.9004980 3.3000000
C -1.4949900 -1.2230250 3.3000000
C -1.5480330 0.1276340 3.3000000
O 2.0040190 -1.8919000 3.3000000
H -2.4900550 0.6602360 3.3000000
H 1.7291390 0.6049670 3.3000000
H -0.2655530 -2.9130890 3.3000000
H -2.3825080 -1.8474300 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '97')] = qcdb.Molecule("""
0 1
O -0.4072070 -2.5021900 0.0000000
C -0.4072070 -1.2746690 0.0000000
N 0.8042290 -0.5582850 0.0000000
C 1.0020800 0.8130100 0.0000000
N -0.1930340 1.5212070 0.0000000
C -1.4363170 0.9290170 0.0000000
C -1.5834480 -0.4146490 0.0000000
O 2.1008310 1.3521860 0.0000000
H -2.5603280 -0.8802410 0.0000000
H 1.6524450 -1.1194300 0.0000000
H -0.0919780 2.5292090 0.0000000
H -2.2781210 1.6138160 0.0000000
--
0 1
O -1.1927920 2.1021900 3.3000000
C -1.1927930 0.8746690 3.3000000
N -2.4042290 0.1582850 3.3000000
C -2.6020800 -1.2130100 3.3000000
N -1.4069670 -1.9212070 3.3000000
C -0.1636830 -1.3290170 3.3000000
C -0.0165520 0.0146480 3.3000000
O -3.7008310 -1.7521850 3.3000000
H 0.9603280 0.4802400 3.3000000
H -3.2524450 0.7194310 3.3000000
H -1.5080230 -2.9292090 3.3000000
H 0.6781200 -2.0138170 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '98')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
O 2.7274930 0.0284280 -3.3000000
C 1.5380200 -0.2749280 -3.3000000
N 0.5444810 0.7218990 -3.3000000
C -0.8331760 0.5747330 -3.3000000
N -1.2240650 -0.7583250 -3.3000000
C -0.3429990 -1.8167020 -3.3000000
C 0.9953510 -1.6272180 -3.3000000
O -1.6271560 1.5061620 -3.3000000
H 1.6879130 -2.4587410 -3.3000000
H 0.8786070 1.6824790 -3.3000000
H -2.2257760 -0.9095050 -3.3000000
H -0.7985270 -2.8016270 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '99')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
O -0.0130090 1.6513790 3.3600000
C 1.0692420 1.1086750 3.3600000
N 1.1423840 -0.2839060 3.3600000
C 2.2854260 -1.0221180 3.3600000
N 3.4793830 -0.5000700 3.3600000
C 3.4550460 0.8444100 3.3600000
C 2.3724120 1.6891490 3.3600000
N 2.7838090 3.0076580 3.3600000
C 4.0586690 2.9492050 3.3600000
N 4.5367780 1.6576590 3.3600000
N 2.1345620 -2.3493720 3.3600000
H 0.2550220 -0.7614500 3.3600000
H 4.7229940 3.7894010 3.3600000
H 5.4838790 1.3605800 3.3600000
H 2.9609760 -2.8966530 3.3600000
H 1.2381640 -2.7944260 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '100')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
C -1.5982280 -2.9490360 3.3600000
N -2.8308990 -3.5868360 3.3600000
C -4.0005400 -2.9065270 3.3600000
C -4.0107280 -1.5698660 3.3600000
C -2.7192980 -0.9187180 3.3600000
N -1.5949260 -1.5998660 3.3600000
O -0.5980710 -3.6295230 3.3600000
N -2.6531990 0.4024280 3.3600000
H -2.8066410 -4.5810390 3.3600000
H -4.8972920 -3.4971900 3.3600000
H -4.9235800 -1.0089750 3.3600000
H -3.4794940 0.9500750 3.3600000
H -1.7581040 0.8646590 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '101')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.5825730 -0.2790500
C -1.8533500 -0.3518640 -0.0620430
N -0.9943890 0.6521290 0.1149880
C -1.4604570 1.8814980 0.3317590
N -2.7070820 2.2763020 0.4013740
C -3.5527210 1.2640760 0.2228910
C -3.2236500 -0.0504790 -0.0089010
N -4.3580740 -0.8272780 -0.1458710
C -5.3247240 -0.0009840 -0.0001730
N -4.9130980 1.2870000 0.2269330
H -0.7040060 2.6348130 0.4645890
H -6.3651290 -0.2529400 -0.0446000
H -5.4840420 2.0871050 0.3680130
H -0.4093220 -1.7576030 -0.3099130
H -2.0356960 -2.3259680 -0.4101310
--
0 1
N -0.1962490 -2.0987510 2.7709500
C -1.2925710 -1.3740360 2.9879570
N -1.1877890 -0.0569040 3.1649880
C -2.2874510 0.6637280 3.3817590
N -3.5280520 0.2503840 3.4513740
C -3.6172170 -1.0655780 3.2728910
C -2.5783160 -1.9356530 3.0410990
N -3.0394940 -3.2308940 2.9041290
C -4.3072140 -3.1305910 3.0498260
N -4.7312590 -1.8466420 3.2769330
H -2.1182570 1.7178040 3.5145890
H -5.0008230 -3.9459620 3.0054000
H -5.6634530 -1.5349360 3.4180130
H 0.7019450 -1.6625240 2.7400870
H -0.2797430 -3.0783000 2.6398690
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '102')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8647480 0.3288050
C 2.4435780 -0.9466670 0.1669230
N 1.9754430 0.3257080 -0.0574310
C 2.7234150 1.4521870 -0.2560600
N 4.0753100 1.2353980 -0.2178340
C 4.6340910 -0.0009940 0.0001750
C 3.9044100 -1.0972430 0.1934740
O 2.2509580 2.5354000 -0.4470600
C 4.4733490 -2.4660930 0.4348390
H 4.6436490 2.0381400 -0.3593790
H 0.9698550 0.4501820 -0.0793790
H 5.7079390 -0.0187620 0.0033080
H 4.1432070 -2.8577080 1.3905330
H 4.1417710 -3.1613140 -0.3283340
H 5.5573000 -2.4391850 0.4291940
--
0 1
O 2.4555320 -0.5209070 3.3788050
C 2.5333330 0.6704300 3.2169230
N 1.4067200 1.4246400 2.9925690
C 1.3497150 2.7756270 2.7939400
N 2.5708460 3.3948650 2.8321660
C 3.7496420 2.7230470 3.0501750
C 3.8036770 1.4072660 3.2434740
O 0.3307920 3.3742620 2.6029400
C 5.0685490 0.6342580 3.4848390
H 2.5588020 4.3783590 2.6906210
H 0.5200190 0.9342720 2.9706210
H 4.6288470 3.3398640 3.0533080
H 5.0316430 0.1233820 4.4405330
H 5.2089370 -0.1230850 2.7216660
H 5.9296670 1.2931580 3.4791940
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '103')] = qcdb.Molecule("""
0 1
C 2.4313070 1.6249990 -1.4530130
N 3.8007370 1.6249990 -1.6786800
C 4.7029040 1.6249990 -0.6702290
C 4.2995430 1.6249990 0.6041590
C 2.8701050 1.6249990 0.8243640
N 2.0112500 1.6249990 -0.1708960
O 1.6903830 1.6249990 -2.4092590
N 2.3989850 1.6249990 2.0604220
H 4.0848920 1.6249990 -2.6317200
H 5.7382910 1.6249990 -0.9548720
H 4.9943920 1.6249990 1.4196840
H 3.0156050 1.6249990 2.8366040
H 1.4048620 1.6249990 2.2234300
--
0 1
O 0.4979320 -1.6249990 1.9422390
C 1.3595090 -1.6249990 1.0916630
N 0.9987390 -1.6249990 -0.2553610
C 1.8577170 -1.6249990 -1.3106620
N 3.1545590 -1.6249990 -1.1831180
C 3.5468800 -1.6249990 0.1030790
C 2.7782730 -1.6249990 1.2410250
N 3.5769760 -1.6249990 2.3678730
C 4.7713760 -1.6249990 1.9183280
N 4.8269760 -1.6249990 0.5422510
N 1.3040920 -1.6249990 -2.5263360
H 0.0072390 -1.6249990 -0.4353230
H 5.6628210 -1.6249990 2.5121130
H 5.6359190 -1.6249990 -0.0329580
H 1.9209400 -1.6249990 -3.3022070
H 0.3140390 -1.6249990 -2.6726050
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '104')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
O -1.5665350 0.5226760 3.1900000
C -1.3848270 -0.6743110 3.1900000
N -0.0830050 -1.1742030 3.1900000
C 0.2658570 -2.4894210 3.1900000
N -0.5995930 -3.4636200 3.1900000
C -1.8707500 -3.0250070 3.1900000
C -2.3395920 -1.7343230 3.1900000
N -3.7206970 -1.7181430 3.1900000
C -4.0590580 -2.9486690 3.1900000
N -2.9784690 -3.8024880 3.1900000
N 1.5747700 -2.7560840 3.1900000
H 0.6453760 -0.4778410 3.1900000
H -5.0634190 -3.3208450 3.1900000
H -2.9886000 -4.7950370 3.1900000
H 1.8398890 -3.7111710 3.1900000
H 2.2750440 -2.0410890 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '105')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6008720 0.1400580
C 1.5033500 -0.3559320 0.0311400
N 0.6443890 0.6596690 -0.0577140
C 1.1104570 1.9032530 -0.1665130
N 2.3570820 2.3026230 -0.2014530
C 3.2027210 1.2786930 -0.1118710
C 2.8736500 -0.0510630 0.0044670
N 4.0080740 -0.8368430 0.0732140
C 4.9747240 -0.0009950 0.0000870
N 4.5630980 1.3018810 -0.1139000
H 0.3540060 2.6652780 -0.2331820
H 6.0151290 -0.2558650 0.0223850
H 5.1340420 2.1112380 -0.1847090
H 0.0593220 -1.7779260 0.1555480
H 1.6856960 -2.3528630 0.2058490
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '106')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8863100 -0.1650310
C -2.7935780 -0.9576130 -0.0837800
N -2.3254430 0.3294740 0.0288250
C -3.0734150 1.4689780 0.1285190
N -4.4253100 1.2496820 0.1093330
C -4.9840910 -0.0010050 -0.0000880
C -4.2544100 -1.1099300 -0.0971060
O -2.6009580 2.5647160 0.2243840
C -4.8233490 -2.4946080 -0.2182500
H -4.9936490 2.0617070 0.1803760
H -1.3198550 0.4553880 0.0398410
H -6.0579390 -0.0189790 -0.0016600
H -4.4932070 -3.1202310 0.6035230
H -4.4917710 -2.9686160 -1.1353540
H -5.9073000 -2.4671550 -0.2167380
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '107')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6030730 0.1120980
C 1.5033500 -0.3564220 0.0249230
N 0.6443890 0.6605760 -0.0461920
C 1.1104570 1.9058690 -0.1332710
N 2.3570820 2.3057880 -0.1612360
C 3.2027210 1.2804500 -0.0895380
C 2.8736500 -0.0511330 0.0035760
N 4.0080740 -0.8379940 0.0585980
C 4.9747240 -0.0009970 0.0000700
N 4.5630980 1.3036710 -0.0911620
H 0.3540060 2.6689420 -0.1866310
H 6.0151290 -0.2562160 0.0179160
H 5.1340420 2.1141400 -0.1478350
H 0.0593220 -1.7803700 0.1244960
H 1.6856960 -2.3560970 0.1647540
--
0 1
O 1.5241600 -0.5494170 3.3837280
C 1.3439910 0.6454500 3.3087260
N 0.0438450 1.1430380 3.2271380
C -0.3032010 2.4557550 3.1386110
N 0.5626500 3.4294030 3.1191180
C 1.8322280 2.9929620 3.1959900
C 2.2991810 1.7048790 3.2880520
N 3.6791050 1.6903250 3.3455930
C 4.0186060 2.9192810 3.2900230
N 2.9399160 3.7704860 3.1975320
N -1.6107030 2.7204770 3.0698940
H -0.6847300 0.4469420 3.2365720
H 5.0225530 3.2920270 3.3102000
H 2.9511880 4.7614640 3.1419340
H -1.8744930 3.6737330 3.0051240
H -2.3112160 2.0058100 3.0815320
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '108')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8889030 -0.1320850
C -2.7935780 -0.9589290 -0.0670550
N -2.3254430 0.3299270 0.0230710
C -3.0734150 1.4709970 0.1028620
N -4.4253100 1.2514000 0.0875060
C -4.9840910 -0.0010070 -0.0000700
C -4.2544100 -1.1114560 -0.0777210
O -2.6009580 2.5682420 0.1795890
C -4.8233490 -2.4980370 -0.1746800
H -4.9936490 2.0645410 0.1443670
H -1.3198550 0.4560130 0.0318880
H -6.0579390 -0.0190050 -0.0013290
H -4.4932070 -3.1092230 0.6578860
H -4.4917710 -2.9879780 -1.0833720
H -5.9073000 -2.4705620 -0.1736470
--
0 1
C -3.3369590 -0.6409430 3.3908960
N -4.3247580 -1.6157810 3.3763480
C -4.0409560 -2.9359630 3.2899980
C -2.7744220 -3.3565600 3.2141470
C -1.7557370 -2.3300110 3.2303510
N -2.0543620 -1.0525720 3.3148920
O -3.6734450 0.5183010 3.4703070
N -0.4803010 -2.6733740 3.1585030
H -5.2616330 -1.2870980 3.4334500
H -4.8798930 -3.6062030 3.2865630
H -2.5244870 -4.3961080 3.1450650
H -0.2161270 -3.6266280 3.0937180
H 0.2361240 -1.9652240 3.1691180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '109')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8935150 0.0000000
C 2.7935780 -0.9612710 0.0000000
N 2.3254430 0.3307330 0.0000000
C 3.0734150 1.4745890 0.0000000
N 4.4253100 1.2544560 0.0000000
C 4.9840910 -0.0010090 0.0000000
C 4.2544100 -1.1141700 0.0000000
O 2.6009580 2.5745130 0.0000000
C 4.8233490 -2.5041370 0.0000000
H 4.9936490 2.0695820 0.0000000
H 1.3198550 0.4571270 0.0000000
H 6.0579390 -0.0190510 0.0000000
H 4.4932070 -3.0557570 0.8731720
H 4.4917710 -3.0562720 -0.8723020
H 5.9073000 -2.4766570 -0.0008860
--
0 1
O 1.5260840 -0.5520650 3.1800000
C 1.3443760 0.6449210 3.1800000
N 0.0425540 1.1448140 3.1800000
C -0.3063080 2.4600320 3.1800000
N 0.5591430 3.4342300 3.1800000
C 1.8302990 2.9956180 3.1800000
C 2.2991410 1.7049340 3.1800000
N 3.6802460 1.6887540 3.1800000
C 4.0186070 2.9192800 3.1800000
N 2.9380180 3.7730980 3.1800000
N -1.6152210 2.7266950 3.1800000
H -0.6858270 0.4484520 3.1800000
H 5.0229680 3.2914560 3.1800000
H 2.9481490 4.7656470 3.1800000
H -1.8803400 3.6817820 3.1800000
H -2.3154950 2.0117000 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '110')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6069870 0.0000000
C -1.5033500 -0.3572920 0.0000000
N -0.6443890 0.6621890 0.0000000
C -1.1104570 1.9105230 0.0000000
N -2.3570820 2.3114180 0.0000000
C -3.2027210 1.2835770 0.0000000
C -2.8736500 -0.0512580 0.0000000
N -4.0080740 -0.8400400 0.0000000
C -4.9747240 -0.0009990 0.0000000
N -4.5630980 1.3068540 0.0000000
H -0.3540060 2.6754590 0.0000000
H -6.0151290 -0.2568420 0.0000000
H -5.1340420 2.1193020 0.0000000
H -0.0593220 -1.7847170 0.0000000
H -1.6856960 -2.3618500 0.0000000
--
0 1
C -3.3390300 -0.6380930 3.1800000
N -4.3265300 -1.6133420 3.1800000
C -4.0409560 -2.9359630 3.1800000
C -2.7728650 -3.3587040 3.1800000
C -1.7545120 -2.3316960 3.1800000
N -2.0548730 -1.0518690 3.1800000
O -3.6771460 0.5233950 3.1800000
N -0.4776020 -2.6770890 3.1800000
H -5.2645780 -1.2830450 3.1800000
H -4.8798220 -3.6063000 3.1800000
H -2.5215120 -4.4002020 3.1800000
H -0.2120980 -3.6321740 3.1800000
H 0.2386050 -1.9686390 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '111')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8863100 0.1650310
C 2.7935780 -0.9576130 0.0837800
N 2.3254430 0.3294740 -0.0288250
C 3.0734150 1.4689780 -0.1285190
N 4.4253100 1.2496820 -0.1093330
C 4.9840910 -0.0010050 0.0000880
C 4.2544100 -1.1099300 0.0971060
O 2.6009580 2.5647160 -0.2243840
C 4.8233490 -2.4946080 0.2182500
H 4.9936490 2.0617070 -0.1803760
H 1.3198550 0.4553880 -0.0398410
H 6.0579390 -0.0189790 0.0016600
H 4.4932070 -2.9680270 1.1361760
H 4.4917710 -3.1206680 -0.6026110
H 5.9073000 -2.4673100 0.2149720
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '112')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6008720 -0.1400580
C -1.5033500 -0.3559320 -0.0311400
N -0.6443890 0.6596690 0.0577140
C -1.1104570 1.9032530 0.1665130
N -2.3570820 2.3026230 0.2014530
C -3.2027210 1.2786930 0.1118710
C -2.8736500 -0.0510630 -0.0044670
N -4.0080740 -0.8368430 -0.0732140
C -4.9747240 -0.0009950 -0.0000870
N -4.5630980 1.3018810 0.1139000
H -0.3540060 2.6652780 0.2331820
H -6.0151290 -0.2558650 -0.0223850
H -5.1340420 2.1112380 0.1847090
H -0.0593220 -1.7779260 -0.1555480
H -1.6856960 -2.3528630 -0.2058490
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '113')] = qcdb.Molecule("""
0 1
N 1.3923840 -1.6008720 0.1400580
C 1.8533500 -0.3559320 0.0311400
N 0.9943890 0.6596690 -0.0577140
C 1.4604570 1.9032530 -0.1665130
N 2.7070820 2.3026230 -0.2014530
C 3.5527210 1.2786930 -0.1118710
C 3.2236500 -0.0510630 0.0044670
N 4.3580740 -0.8368430 0.0732140
C 5.3247240 -0.0009950 0.0000870
N 4.9130980 1.3018810 -0.1139000
H 0.7040060 2.6652780 -0.2331820
H 6.3651290 -0.2558650 0.0223850
H 5.4840420 2.1112380 -0.1847090
H 0.4093220 -1.7779260 0.1555480
H 2.0356960 -2.3528630 0.2058490
--
0 1
O 2.4682050 -0.5383510 3.4250310
C 2.5397670 0.6615740 3.3437800
N 1.4045070 1.4276870 3.2311750
C 1.3398450 2.7892110 3.1314810
N 2.5624500 3.4064220 3.1506670
C 3.7496490 2.7230370 3.2600880
C 3.8111350 1.3970020 3.3571060
O 0.3135610 3.3979790 3.0356160
C 5.0853090 0.6111890 3.4782500
H 2.5449500 4.3974240 3.0796240
H 0.5169590 0.9384830 3.2201590
H 4.6289750 3.3396890 3.2616600
H 5.0964880 0.0341320 4.3961760
H 5.1850460 -0.0902010 2.6573890
H 5.9461980 1.2704040 3.4749720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '114')] = qcdb.Molecule("""
0 1
N -1.4867430 1.6920980 -2.3336600
C -1.5399110 1.6049230 -1.0055780
N -0.4087210 1.5338080 -0.3037890
C -0.4671620 1.4467290 1.0245780
N -1.5291910 1.4187640 1.7901520
C -2.6502880 1.4904620 1.0763140
C -2.7488050 1.5835760 -0.2917850
N -4.0708590 1.6385980 -0.6895780
C -4.7315520 1.5800700 0.4051650
N -3.9369080 1.4888380 1.5187780
H 0.4880690 1.3933690 1.5165470
H -5.7999030 1.5979160 0.4839400
H -4.2294590 1.4321650 2.4660110
H -0.6065830 1.7044960 -2.8060620
H -2.3312660 1.7447540 -2.8510340
--
0 1
O -1.3473090 -1.4479140 -0.7794320
C -2.3605260 -1.5129430 -0.1308130
N -2.3135810 -1.6030670 1.2396240
C -3.3775550 -1.6828570 2.0937100
N -4.5954240 -1.6675010 1.4671030
C -4.7398420 -1.5799270 0.1033200
C -3.7027260 -1.5022770 -0.7272960
O -3.2672880 -1.7595820 3.2832490
C -3.8153430 -1.4053200 -2.2218250
H -5.3872210 -1.7243610 2.0648200
H -1.3961730 -1.6118840 1.6702820
H -5.7555700 -1.5786680 -0.2456340
H -3.3109870 -2.2369830 -2.7010640
H -3.3501360 -0.4957970 -2.5852230
H -4.8546930 -1.4081210 -2.5307720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '115')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.6069870 0.0000000
C -1.8533500 -0.3572920 0.0000000
N -0.9943890 0.6621890 0.0000000
C -1.4604570 1.9105230 0.0000000
N -2.7070820 2.3114180 0.0000000
C -3.5527210 1.2835770 0.0000000
C -3.2236500 -0.0512580 0.0000000
N -4.3580740 -0.8400400 0.0000000
C -5.3247240 -0.0009990 0.0000000
N -4.9130980 1.3068540 0.0000000
H -0.7040060 2.6754590 0.0000000
H -6.3651290 -0.2568420 0.0000000
H -5.4840420 2.1193020 0.0000000
H -0.4093220 -1.7847170 0.0000000
H -2.0356960 -2.3618500 0.0000000
--
0 1
N -0.1818990 -2.1185030 3.2400000
C -1.2893810 -1.3784270 3.2400000
N -1.1937020 -0.0487650 3.2400000
C -2.3045120 0.6872100 3.2400000
N -3.5486930 0.2787930 3.2400000
C -3.6286790 -1.0498020 3.2400000
C -2.5778590 -1.9362830 3.2400000
N -3.0319930 -3.2412190 3.2400000
C -4.3072050 -3.1306030 3.2400000
N -4.7429290 -1.8305800 3.2400000
H -2.1421480 1.7506870 3.2400000
H -4.9985290 -3.9491190 3.2400000
H -5.6823780 -1.5088880 3.2400000
H 0.7178820 -1.6844600 3.2400000
H -0.2586520 -3.1073290 3.2400000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '116')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8935150 0.0000000
C 2.4435780 -0.9612710 0.0000000
N 1.9754430 0.3307330 0.0000000
C 2.7234150 1.4745890 0.0000000
N 4.0753100 1.2544560 0.0000000
C 4.6340910 -0.0010090 0.0000000
C 3.9044100 -1.1141700 0.0000000
O 2.2509580 2.5745130 0.0000000
C 4.4733490 -2.5041370 0.0000000
H 4.6436490 2.0695820 0.0000000
H 0.9698550 0.4571270 0.0000000
H 5.7079390 -0.0190510 0.0000000
H 4.1432070 -3.0557570 0.8731720
H 4.1417710 -3.0562720 -0.8723020
H 5.5573000 -2.4766570 -0.0008860
--
0 1
O 2.4724400 -0.5441800 3.2400000
C 2.5419170 0.6586150 3.2400000
N 1.4037670 1.4287050 3.2400000
C 1.3365470 2.7937510 3.2400000
N 2.5596440 3.4102840 3.2400000
C 3.7496510 2.7230340 3.2400000
C 3.8136270 1.3935720 3.2400000
O 0.3078020 3.4059050 3.2400000
C 5.0909100 0.6034800 3.2400000
H 2.5403210 4.4037960 3.2400000
H 0.5159370 0.9398900 3.2400000
H 4.6290170 3.3396300 3.2400000
H 5.1480540 -0.0368430 4.1131720
H 5.1471940 -0.0381040 2.3676980
H 5.9516930 1.2628420 3.2391140
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '117')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 8.5479976 21.7979939 2.3959993
N 9.1919974 20.5259942 2.6589993
C 8.4229976 19.3799946 2.5429993
O 7.2269980 19.3959946 2.3429993
N 9.0979975 18.2049949 2.7069992
C 10.4579971 18.0869949 2.9379992
O 10.9519969 16.9699952 3.0289992
C 11.2079969 19.3189946 3.0599991
C 12.6759964 19.2659946 3.3619991
C 10.5419970 20.4719943 2.8979992
H 7.4741299 21.6651819 2.5133333
H 8.9049615 22.5495287 3.1049871
H 8.7503455 22.1445498 1.3760436
H 11.0339909 21.4374260 2.9618352
H 13.2133913 18.6878638 2.6029743
H 13.1061963 20.2701373 3.4050200
H 12.8619664 18.7673097 4.3193848
H 8.5371916 17.3217571 2.6353613
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '118')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
C 10.7049970 9.6579973 11.8009967
N 11.0689969 11.0699969 11.9839966
C 10.2199971 12.1419966 11.9589966
N 10.8209970 13.3089963 12.1399966
C 12.1439966 12.9639964 12.2549966
C 13.3189963 13.7529961 12.4509965
O 13.3749963 14.9839958 12.5499965
N 14.4609959 13.0409963 12.5269965
C 14.5119959 11.6719967 12.4369965
N 15.7519956 11.1639969 12.5359965
N 13.4609962 10.8809970 12.2549966
C 12.3209965 11.5909968 12.1779966
H 11.6087247 9.0642815 11.9411017
H 10.3130781 9.4887283 10.7941210
H 9.9552752 9.3611644 12.5389945
H 15.3408647 13.5779012 12.6455145
H 9.1538724 12.0114576 11.8260867
H 15.8197976 10.1594152 12.5501065
H 16.5616854 11.7259467 12.8207994
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '119')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
C 9.1690000 13.6920000 8.6010000
N 10.3470000 14.4960000 8.8170000
C 11.5790000 13.8470000 8.7070000
O 11.6020000 12.6420000 8.4120000
N 12.6940000 14.5550000 8.8810000
C 12.6740000 15.9260000 9.1860000
N 13.8310000 16.5100000 9.3350000
C 11.4220000 16.5640000 9.2670000
C 10.3210000 15.8540000 9.0930000
H 9.1403680 12.8642760 9.3131620
H 8.2785600 14.3117950 8.7260530
H 9.1795130 13.2651190 7.5953140
H 11.3501160 17.6252970 9.4808030
H 9.3300790 16.2918180 9.1491660
H 14.7113690 15.9651740 9.2135180
H 13.8876420 17.4962710 9.5342540
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '120')] = qcdb.Molecule("""
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
--
0 1
C 18.8920000 9.6580000 9.7710000
N 18.5280000 11.0700000 9.5880000
C 19.3770000 12.1420000 9.6130000
N 18.7760000 13.3090000 9.4320000
C 17.4530000 12.9640000 9.3170000
C 16.2780000 13.7530000 9.1210000
O 16.2220000 14.9840000 9.0220000
N 15.1360000 13.0410000 9.0450000
C 15.0850000 11.6720000 9.1350000
N 13.8450000 11.1640000 9.0360000
N 16.1360000 10.8810000 9.3170000
C 17.2760000 11.5910000 9.3940000
H 14.2561290 13.5779040 8.9264920
H 13.0354310 11.7259420 8.7508330
H 13.7773690 10.1594100 9.0211800
H 17.9880060 9.0643740 9.6322420
H 19.2851540 9.4890700 10.7774400
H 19.6407390 9.3607520 9.0321720
H 20.4431070 12.0114850 9.7460660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '121')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '122')] = qcdb.Molecule("""
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '123')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '124')] = qcdb.Molecule("""
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-dimer' ] = 1391.98129069
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoA-unCP' ] = 357.13933560
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoB-unCP' ] = 596.62760720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-dimer' ] = 1654.40527853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoA-unCP' ] = 443.56399475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoB-unCP' ] = 696.60732032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-dimer' ] = 1365.23227533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoA-unCP' ] = 503.39630679
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoB-unCP' ] = 440.30156925
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-dimer' ] = 1645.63864536
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoA-unCP' ] = 596.45767348
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoB-unCP' ] = 533.27333592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-dimer' ] = 1519.08619634
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoA-unCP' ] = 694.08169190
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoB-unCP' ] = 357.17481831
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-dimer' ] = 1250.60241408
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoA-unCP' ] = 357.05937707
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoB-unCP' ] = 502.93669666
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-dimer' ] = 1377.89785724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoA-unCP' ] = 596.76364865
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoB-unCP' ] = 357.05278633
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-dimer' ] = 1101.46127813
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoA-unCP' ] = 357.43034135
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoB-unCP' ] = 369.97349400
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-dimer' ] = 1026.69630020
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoA-unCP' ] = 357.07506993
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoB-unCP' ] = 357.22791266
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-dimer' ] = 1049.26311591
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoA-unCP' ] = 357.30966824
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoB-unCP' ] = 357.25457437
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-dimer' ] = 1501.52577097
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoA-unCP' ] = 357.30771904
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoB-unCP' ] = 670.49331720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-dimer' ] = 1338.80888094
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoA-unCP' ] = 502.97292629
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoB-unCP' ] = 412.74720533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-dimer' ] = 1521.60537748
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoA-unCP' ] = 596.65701652
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoB-unCP' ] = 440.44274318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-dimer' ] = 1516.62359887
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoA-unCP' ] = 596.92255465
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoB-unCP' ] = 440.54554467
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-dimer' ] = 1318.42675206
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoA-unCP' ] = 503.53728859
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoB-unCP' ] = 425.75653587
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-dimer' ] = 1478.61731319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoA-unCP' ] = 596.66795120
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoB-unCP' ] = 413.04224329
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-dimer' ] = 1487.72900733
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoA-unCP' ] = 596.75974596
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoB-unCP' ] = 412.84579804
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-dimer' ] = 1229.51638352
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoA-unCP' ] = 356.91023176
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoB-unCP' ] = 503.30931271
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-dimer' ] = 1706.17310708
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoA-unCP' ] = 596.15051246
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoB-unCP' ] = 596.63218121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-dimer' ] = 1830.02648907
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoA-unCP' ] = 596.58545524
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoB-unCP' ] = 670.44234386
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-dimer' ] = 1835.32380783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoA-unCP' ] = 670.04325278
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoB-unCP' ] = 596.84640861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-dimer' ] = 1578.33475973
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoA-unCP' ] = 596.48090327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoB-unCP' ] = 503.33140569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-dimer' ] = 1570.60868318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoA-unCP' ] = 503.54456755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoB-unCP' ] = 596.89708469
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-dimer' ] = 1563.56410044
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoA-unCP' ] = 593.67756289
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoB-unCP' ] = 501.45867869
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-dimer' ] = 1563.69890911
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoA-unCP' ] = 595.94249141
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoB-unCP' ] = 503.12213297
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-dimer' ] = 1590.81054033
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoA-unCP' ] = 596.44241276
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoB-unCP' ] = 502.87235332
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-dimer' ] = 1551.55026390
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoA-unCP' ] = 595.72714752
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoB-unCP' ] = 503.45401843
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-dimer' ] = 1411.30275525
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoA-unCP' ] = 503.40799836
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoB-unCP' ] = 503.40916818
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-dimer' ] = 1424.44630670
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoA-unCP' ] = 503.49043267
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoB-unCP' ] = 502.94567640
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-dimer' ] = 1435.87093606
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoA-unCP' ] = 503.11592074
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoB-unCP' ] = 503.11223193
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-dimer' ] = 1849.09724927
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoA-unCP' ] = 596.75580700
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoB-unCP' ] = 693.16448502
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-dimer' ] = 1250.78225068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoA-unCP' ] = 413.69053788
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoB-unCP' ] = 413.05557496
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-dimer' ] = 1622.96228374
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoB-unCP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-dimer' ] = 1657.51101967
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoB-unCP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-dimer' ] = 1626.09750599
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoB-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-dimer' ] = 1590.75136012
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoA-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoB-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-dimer' ] = 1401.39568382
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-dimer' ] = 1399.57843792
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-dimer' ] = 1349.21626455
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoB-unCP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-dimer' ] = 1314.37579643
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoB-unCP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-dimer' ] = 1334.39438102
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoA-unCP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoB-unCP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-dimer' ] = 1319.28084098
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoA-unCP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoB-unCP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-dimer' ] = 973.06142895
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoA-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoB-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-dimer' ] = 1724.48127327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoA-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoB-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-dimer' ] = 1806.98548380
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoB-unCP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-dimer' ] = 971.53306922
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoB-unCP' ] = 359.95491240
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-dimer' ] = 1208.85975313
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-dimer' ] = 1443.15679999
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-dimer' ] = 1481.99538897
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoA-unCP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoB-unCP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-dimer' ] = 1189.16944115
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoA-unCP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoB-unCP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-dimer' ] = 1634.77658138
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoB-unCP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-dimer' ] = 1081.19928424
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoB-unCP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-dimer' ] = 1083.47826689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoA-unCP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-dimer' ] = 1581.29557886
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-dimer' ] = 1219.56453397
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoB-unCP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-dimer' ] = 1405.28947121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoB-unCP' ] = 507.48987021
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-dimer' ] = 1474.05108110
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoA-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoB-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-dimer' ] = 1198.83425089
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoA-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoB-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-dimer' ] = 1321.59763058
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoB-unCP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-dimer' ] = 1311.14164882
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoA-unCP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoB-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-dimer' ] = 1662.05565427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoB-unCP' ] = 596.81166025
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-dimer' ] = 1471.46519284
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoA-unCP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoB-unCP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-dimer' ] = 2118.75518694
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoA-unCP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoB-unCP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-dimer' ] = 1195.41740656
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoB-unCP' ] = 442.44825964
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-dimer' ] = 1827.15190604
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoB-unCP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-dimer' ] = 1330.55895484
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoA-unCP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoB-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-dimer' ] = 1207.89930362
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoB-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-dimer' ] = 1669.65398984
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoA-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-dimer' ] = 1734.47387907
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-dimer' ] = 963.62312494
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-dimer' ] = 1537.57227681
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoA-unCP' ] = 596.44964921
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoB-unCP' ] = 357.10169648
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-dimer' ] = 1870.03529750
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoA-unCP' ] = 696.44803543
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoB-unCP' ] = 443.64584898
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-dimer' ] = 1542.14304870
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoA-unCP' ] = 503.36564485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoB-unCP' ] = 440.14700689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-dimer' ] = 1873.30862324
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoA-unCP' ] = 596.40342598
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoB-unCP' ] = 532.86039581
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-dimer' ] = 1136.50020569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-dimer' ] = 1143.60873849
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoB-unCP' ] = 355.44451746
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-dimer' ] = 1144.33569661
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoB-unCP' ] = 355.44455365
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-dimer' ] = 1144.53152982
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-dimer' ] = 1136.39531003
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoB-unCP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-dimer' ] = 1137.56590421
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoB-unCP' ] = 355.44458162
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-dimer' ] = 1089.71176518
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoB-unCP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-dimer' ] = 1135.52588803
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-dimer' ] = 1135.89252554
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoB-unCP' ] = 355.44457113
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-dimer' ] = 1136.27990430
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-dimer' ] = 1137.68428928
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-dimer' ] = 1091.48755032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoB-unCP' ] = 355.44459806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-dimer' ] = 1114.79473660
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoB-unCP' ] = 355.44457375
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-dimer' ] = 1144.74104397
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-dimer' ] = 1593.04361768
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoB-unCP' ] = 501.81461592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-dimer' ] = 1914.59068159
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoA-unCP' ] = 593.90346744
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoB-unCP' ] = 593.90347753
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-dimer' ] = 1358.00357589
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoB-unCP' ] = 355.44452826
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-dimer' ] = 1749.63836451
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoB-unCP' ] = 501.81458877
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-dimer' ] = 1135.19068685
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoA-unCP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoB-unCP' ] = 355.44453848
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-dimer' ] = 1368.42192946
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoB-unCP' ] = 355.38546038
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-dimer' ] = 1491.03516654
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoB-unCP' ] = 355.44418383
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-dimer' ] = 1143.55810352
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoA-unCP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoB-unCP' ] = 355.38590060
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-dimer' ] = 1124.41284995
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoA-unCP' ] = 355.38547127
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoB-unCP' ] = 355.38549385
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-dimer' ] = 1517.60433270
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoB-unCP' ] = 355.38464230
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-dimer' ] = 1912.03719777
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoB-unCP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-dimer' ] = 1120.88525374
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoB-unCP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-dimer' ] = 1612.73592913
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoA-unCP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoB-unCP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-dimer' ] = 1415.77211916
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoA-unCP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoB-unCP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-dimer' ] = 1529.52830806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoA-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoB-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-dimer' ] = 1475.57522935
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoB-unCP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-dimer' ] = 1782.23519943
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-dimer' ] = 1257.02669139
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-dimer' ] = 1740.95680727
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoA-unCP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoB-unCP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-dimer' ] = 1260.01230981
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoA-unCP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoB-unCP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-dimer' ] = 1609.15794755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoB-unCP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-dimer' ] = 1349.63628460
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoB-unCP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-dimer' ] = 1673.67295485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoA-unCP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-dimer' ] = 1367.26317388
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-dimer' ] = 1509.79318924
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoB-unCP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-dimer' ] = 1545.03032944
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoA-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoB-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-dimer' ] = 1601.56827337
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoB-unCP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-dimer' ] = 1410.31245614
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoB-unCP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-dimer' ] = 1816.15304322
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoB-unCP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-dimer' ] = 1727.56215886
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoB-unCP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-dimer' ] = 1650.54443625
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoB-unCP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-dimer' ] = 1964.24212034
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoA-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoB-unCP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-dimer' ] = 1496.57764615
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoB-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-dimer' ] = 1752.69730428
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoA-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-dimer' ] = 1512.39205830
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoB-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-dimer' ] = 1498.52644117
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoA-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoA-CP' ] = 357.13933560
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoB-CP' ] = 596.62760720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoA-CP' ] = 443.56399475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoB-CP' ] = 696.60732032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoA-CP' ] = 503.39630679
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoB-CP' ] = 440.30156925
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoA-CP' ] = 596.45767348
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoB-CP' ] = 533.27333592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoA-CP' ] = 694.08169190
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoB-CP' ] = 357.17481831
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoA-CP' ] = 357.05937707
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoB-CP' ] = 502.93669666
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoA-CP' ] = 596.76364865
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoB-CP' ] = 357.05278633
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoA-CP' ] = 357.43034135
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoB-CP' ] = 369.97349400
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoA-CP' ] = 357.07506993
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoB-CP' ] = 357.22791266
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoA-CP' ] = 357.30966824
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoB-CP' ] = 357.25457437
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoA-CP' ] = 357.30771904
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoB-CP' ] = 670.49331720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoA-CP' ] = 502.97292629
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoB-CP' ] = 412.74720533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoA-CP' ] = 596.65701652
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoB-CP' ] = 440.44274318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoA-CP' ] = 596.92255465
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoB-CP' ] = 440.54554467
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoA-CP' ] = 503.53728859
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoB-CP' ] = 425.75653587
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoA-CP' ] = 596.66795120
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoB-CP' ] = 413.04224329
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoA-CP' ] = 596.75974596
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoB-CP' ] = 412.84579804
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoA-CP' ] = 356.91023176
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoB-CP' ] = 503.30931271
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoA-CP' ] = 596.15051246
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoB-CP' ] = 596.63218121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoA-CP' ] = 596.58545524
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoB-CP' ] = 670.44234386
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoA-CP' ] = 670.04325278
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoB-CP' ] = 596.84640861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoA-CP' ] = 596.48090327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoB-CP' ] = 503.33140569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoA-CP' ] = 503.54456755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoB-CP' ] = 596.89708469
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoA-CP' ] = 593.67756289
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoB-CP' ] = 501.45867869
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoA-CP' ] = 595.94249141
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoB-CP' ] = 503.12213297
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoA-CP' ] = 596.44241276
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoB-CP' ] = 502.87235332
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoA-CP' ] = 595.72714752
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoB-CP' ] = 503.45401843
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoA-CP' ] = 503.40799836
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoB-CP' ] = 503.40916818
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoA-CP' ] = 503.49043267
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoB-CP' ] = 502.94567640
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoA-CP' ] = 503.11592074
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoB-CP' ] = 503.11223193
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoA-CP' ] = 596.75580700
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoB-CP' ] = 693.16448502
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoA-CP' ] = 413.69053788
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoB-CP' ] = 413.05557496
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoB-CP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoB-CP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoB-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoA-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoB-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoB-CP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoB-CP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoB-CP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoA-CP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoB-CP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoA-CP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoB-CP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoA-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoB-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoA-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoB-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoB-CP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoB-CP' ] = 359.95491240
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoA-CP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoB-CP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoA-CP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoB-CP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoB-CP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoB-CP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoA-CP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoB-CP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoB-CP' ] = 507.48987021
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoA-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoB-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoA-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoB-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoB-CP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoA-CP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoB-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoB-CP' ] = 596.81166025
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoA-CP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoB-CP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoA-CP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoB-CP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoB-CP' ] = 442.44825964
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoB-CP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoA-CP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoB-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoB-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoA-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoB-CP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoA-CP' ] = 596.44964921
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoB-CP' ] = 357.10169648
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoA-CP' ] = 696.44803543
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoB-CP' ] = 443.64584898
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoA-CP' ] = 503.36564485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoB-CP' ] = 440.14700689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoA-CP' ] = 596.40342598
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoB-CP' ] = 532.86039581
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoB-CP' ] = 355.44451746
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoB-CP' ] = 355.44455365
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoB-CP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoB-CP' ] = 355.44458162
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoB-CP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoB-CP' ] = 355.44457113
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoB-CP' ] = 355.44459806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoB-CP' ] = 355.44457375
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoB-CP' ] = 501.81461592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoA-CP' ] = 593.90346744
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoB-CP' ] = 593.90347753
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoB-CP' ] = 355.44452826
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoB-CP' ] = 501.81458877
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoA-CP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoB-CP' ] = 355.44453848
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoB-CP' ] = 355.38546038
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoB-CP' ] = 355.44418383
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoA-CP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoB-CP' ] = 355.38590060
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoA-CP' ] = 355.38547127
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoB-CP' ] = 355.38549385
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoB-CP' ] = 355.38464230
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoB-CP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoB-CP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoA-CP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoB-CP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoA-CP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoB-CP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoA-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoB-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoB-CP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoA-CP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoB-CP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoA-CP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoB-CP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoB-CP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoB-CP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoA-CP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoB-CP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoA-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoB-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoB-CP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoB-CP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoB-CP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoB-CP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoB-CP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoA-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoB-CP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoB-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoA-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoB-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoA-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoB-CP' ] = 357.96626427
|
kratman/psi4public
|
psi4/share/psi4/databases/JSCH.py
|
Python
|
gpl-2.0
| 287,205
|
[
"Psi4"
] |
21fa585bbe06a9a80c636865702c1638b5e9ae8bbe09f185d8de6ab06d99dc03
|
#!/usr/bin/env python3
import os
import sys
import time
import logging
import argparse
import tempfile
import resource
import subprocess
import collections
import distutils.spawn
import parallel_tools
import seqtools
import shims
# There can be problems with the submodules, but none are essential.
# Try to load these modules, but if there's a problem, load a harmless dummy and continue.
simplewrap = shims.get_module_or_shim('utillib.simplewrap')
version = shims.get_module_or_shim('utillib.version')
phone = shims.get_module_or_shim('ET.phone')
#TODO: Warn if it looks like the two input FASTQ files are the same (i.e. the _1 file was given
# twice). Can tell by whether the alpha and beta (first and last 12bp) portions of the barcodes
# are always identical. This would be a good thing to warn about, since it's an easy mistake
# to make, but it's not obvious that it happened. The pipeline won't fail, but will just
# produce pretty weird results.
USAGE = """$ %(prog)s [options] families.tsv > families.msa.tsv
$ cat families.tsv | %(prog)s [options] > families.msa.tsv"""
DESCRIPTION = """Read in sorted FASTQ data and do multiple sequence alignments of each family."""
def make_argparser():
wrapper = simplewrap.Wrapper()
wrap = wrapper.wrap
parser = argparse.ArgumentParser(usage=USAGE, description=wrap(DESCRIPTION),
formatter_class=argparse.RawTextHelpFormatter)
wrapper.width = wrapper.width - 24
parser.add_argument('infile', metavar='read-families.tsv', nargs='?', default=sys.stdin,
type=argparse.FileType('r'),
help=wrap('The input reads, sorted into families. One line per read pair, 8 tab-delimited '
'columns:\n'
'1. canonical barcode\n'
'2. barcode order ("ab" for alpha+beta, "ba" for beta-alpha)\n'
'3. read 1 name\n'
'4. read 1 sequence\n'
'5. read 1 quality scores\n'
'6. read 2 name\n'
'7. read 2 sequence\n'
'8. read 2 quality scores'))
parser.add_argument('-a', '--aligner', choices=('mafft', 'kalign', 'dummy'), default='kalign',
help=wrap('The multiple sequence aligner to use. Default: %(default)s'))
parser.add_argument('-I', '--no-check-ids', dest='check_ids', action='store_false', default=True,
help='Don\'t check to make sure read pairs have identical ids. By default, if this '
'encounters a pair of reads in families.tsv with ids that aren\'t identical (minus an '
'ending /1 or /2), it will throw an error.')
parser.add_argument('-p', '--processes', default=0,
help=wrap('Number of worker subprocesses to use. If 0, no subprocesses will be started and '
'everything will be done inside one process. Give "auto" to use as many processes '
'as there are CPU cores. Default: %(default)s.'))
parser.add_argument('--queue-size', type=int,
help=wrap('How long to go accumulating responses from worker subprocesses before dealing '
f'with all of them. Default: {parallel_tools.QUEUE_SIZE_MULTIPLIER} * the number of '
'worker --processes.'))
parser.add_argument('--phone-home', action='store_true',
help=wrap('Report helpful usage data to the developer, to better understand the use cases and '
'performance of the tool. The only data which will be recorded is the name and '
'version of the tool, the size of the input data, the time and memory taken to '
'process it, and the IP address of the machine running it. Also, if the script '
'fails, it will report the name of the exception thrown and the line of code it '
'occurred in. No filenames are sent, and the only parameters reported are --aligner, '
'--processes, and --queue-size, which are necessary to evaluate performance. All the '
'reporting and recording code is available at https://github.com/NickSto/ET.'))
parser.add_argument('--galaxy', dest='platform', action='store_const', const='galaxy',
help=wrap('Tell the script it\'s running on Galaxy. Currently this only affects data reported '
'when phoning home.'))
parser.add_argument('--test', action='store_true',
help=wrap('If reporting usage data, mark this as a test run.'))
parser.add_argument('--version', action='version', version=str(version.get_version()),
help=wrap('Print the version number and exit.'))
parser.add_argument('-L', '--log-file', type=argparse.FileType('w'), default=sys.stderr,
help=wrap('Print log messages to this file instead of to stderr. NOTE: Will overwrite the file.'))
parser.add_argument('-q', '--quiet', dest='volume', action='store_const', const=logging.CRITICAL,
default=logging.WARNING)
parser.add_argument('-v', '--verbose', dest='volume', action='store_const', const=logging.INFO)
parser.add_argument('-D', '--debug', dest='volume', action='store_const', const=logging.DEBUG)
return parser
def main(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
logging.basicConfig(stream=args.log_file, level=args.volume, format='%(message)s')
tone_down_logger()
start_time = time.time()
# If the user requested, report back some data about the start of the run.
if args.phone_home:
call = phone.Call(__file__, version.get_version(), platform=args.platform, test=args.test,
fail='warn')
call.send_data('start')
data = {
'stdin': args.infile is sys.stdin,
'aligner': args.aligner,
'processes': args.processes,
'queue_size': args.queue_size,
}
if data['stdin']:
data['input_size'] = None
else:
data['input_size'] = os.path.getsize(args.infile.name)
call.send_data('prelim', run_data=data)
# Execute as much of the script as possible in a try/except to catch any exception that occurs
# and report it via ET.phone.
try:
if args.queue_size is not None and args.queue_size <= 0:
fail('Error: --queue-size must be greater than zero.')
# If we're using mafft, check that we can execute it.
if args.aligner == 'mafft' and not distutils.spawn.find_executable('mafft'):
fail('Error: Could not find "mafft" command on $PATH.')
# Open a pool of worker processes.
stats = {'duplexes':0, 'time':0, 'pairs':0, 'runs':0, 'failures':0, 'aligned_pairs':0}
pool = parallel_tools.SyncAsyncPool(
process_duplex, processes=args.processes, static_kwargs={'aligner':args.aligner},
queue_size=args.queue_size, callback=process_result, callback_args=[stats]
)
try:
# The main loop.
align_families(args.infile, pool, stats, check_ids=args.check_ids)
finally:
# If an exception occurs in the parent without stopping the child processes, this will hang.
# Make sure to kill the children in all cases.
pool.close()
pool.join()
# Close input filehandle if it's open.
if args.infile is not sys.stdin:
args.infile.close()
# Final stats on the run.
run_time = int(time.time() - start_time)
max_mem = get_max_mem()
logging.error(
'Processed {pairs} read pairs in {duplexes} duplexes, with {failures} alignment failures.'
.format(**stats)
)
if stats['aligned_pairs'] > 0 and stats['runs'] > 0:
per_pair = stats['time'] / stats['aligned_pairs']
per_run = stats['time'] / stats['runs']
logging.error(f'{per_pair:0.3f}s per pair, {per_run:0.3f}s per run.')
logging.error(f'in {run_time}s total time and {max_mem:0.2f}MB RAM.')
except (Exception, KeyboardInterrupt) as exception:
if args.phone_home and call:
try:
exception_data = getattr(exception, 'child_context', parallel_tools.get_exception_data())
logging.critical(parallel_tools.format_traceback(exception_data))
exception_data = parallel_tools.scrub_tb_paths(exception_data, script_path=__file__)
except Exception:
exception_data = {}
run_time = int(time.time() - start_time)
try:
run_data = get_run_data(stats, pool, args.aligner)
except (Exception, UnboundLocalError):
run_data = {}
try:
run_data['mem'] = get_max_mem()
except Exception:
pass
run_data['failed'] = True
if exception_data:
run_data['exception'] = exception_data
call.send_data('end', run_time=run_time, run_data=run_data)
raise exception
else:
raise
if args.phone_home and call:
run_data = get_run_data(stats, pool, args.aligner, max_mem)
call.send_data('end', run_time=run_time, run_data=run_data)
def get_max_mem():
"""Get the maximum memory usage (RSS) of this process and all its children, in MB."""
maxrss_total = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
maxrss_total += resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
return maxrss_total/1024
def get_run_data(stats, pool, aligner, max_mem=None):
run_data = stats.copy()
run_data['align_time'] = run_data['time']
del run_data['time']
if max_mem is not None:
run_data['mem'] = max_mem
run_data['processes'] = pool.processes
run_data['queue_size'] = pool.queue_size
run_data['aligner'] = aligner
return run_data
def align_families(infile, pool, stats, check_ids=True):
"""The main loop.
This processes whole duplexes (pairs of strands) at a time for a future option to align the
whole duplex at a time.
duplex data structure:
duplex = {
'ab': [
{'name1': 'read_name1a',
'seq1': 'GATT-ACA',
'qual1': 'sc!0 /J*',
'name2': 'read_name1b',
'seq2': 'ACTGACTA',
'qual2': '34I&SDF)'
},
{'name1': 'read_name2a',
...
},
...
],
'ba': [
...
]
}
e.g.:
seq = duplex[order][pair_num]['seq1']"""
duplex = collections.OrderedDict()
family = []
barcode = None
order = None
for line in infile:
fields = line.rstrip('\r\n').split('\t')
if len(fields) != 8:
continue
(this_barcode, this_order, name1, seq1, qual1, name2, seq2, qual2) = fields
if check_ids:
assert_read_ids_match(name1, name2)
# If the barcode or order has changed, we're in a new family.
# Process the reads we've previously gathered as one family and start a new family.
if this_barcode != barcode or this_order != order:
duplex[order] = family
# If the barcode is different, we're at the end of the whole duplex. Process the it and start
# a new one. If the barcode is the same, we're in the same duplex, but we've switched strands.
if this_barcode != barcode:
# orders_str = '/'.join([str(len(duplex[o])) for o in duplex]
# logging.debug(f'processing {barcode}: {len(duplex)} orders ({orders_str})'
if barcode is not None:
pool.compute(duplex, barcode)
stats['duplexes'] += 1
duplex = collections.OrderedDict()
barcode = this_barcode
order = this_order
family = []
pair = {'name1': name1, 'seq1':seq1, 'qual1':qual1, 'name2':name2, 'seq2':seq2, 'qual2':qual2}
family.append(pair)
stats['pairs'] += 1
# Process the last family.
duplex[order] = family
# orders_str = '/'.join([str(len(duplex[o])) for o in duplex]
# logging.debug(f'processing {barcode}: {len(duplex)} orders ({orders_str})'
pool.compute(duplex, barcode)
stats['duplexes'] += 1
# Retrieve the remaining results.
logging.info('Flushing remaining results from worker processes..')
pool.flush()
def assert_read_ids_match(name1, name2):
id1 = name1.split()[0]
id2 = name2.split()[0]
if id1.endswith('/1'):
id1 = id1[:-2]
if id2.endswith('/2'):
id2 = id2[:-2]
if id1 == id2:
return True
elif id1.endswith('/2') and id2.endswith('/1'):
raise ValueError(
f'Read names not as expected. Mate 1 ends with /2 and mate 2 ends with /1:\n'
f' Mate 1: {name1!r}\n Mate 2: {name2!r}'
)
else:
raise ValueError(f'Read names {name1!r} and {name2!r} do not match.')
def process_duplex(duplex, barcode, aligner='mafft'):
output = ''
orders_str = '", "'.join(map(str, duplex.keys()))
logging.debug(f'Starting {barcode} (orders "{orders_str}")')
run_stats = {'time':0, 'runs':0, 'aligned_pairs':0, 'failures':0}
orders = tuple(duplex.keys())
if len(duplex) == 0 or None in duplex:
logging.warning(f'Empty duplex {barcode}.')
return '', {}
elif len(duplex) == 1:
# If there's only one strand in the duplex, just process the first mate, then the second.
combos = ((1, orders[0]), (2, orders[0]))
elif len(duplex) == 2:
# If there's two strands, process in a criss-cross order:
# strand1/mate1, strand2/mate2, strand1/mate2, strand2/mate1
combos = ((1, orders[0]), (2, orders[1]), (2, orders[0]), (1, orders[1]))
else:
raise AssertionError(f'More than 2 orders in duplex {barcode}: {orders}')
for mate, order in combos:
family = duplex[order]
start = time.time()
try:
alignment = align_family(family, mate, aligner=aligner)
except AssertionError as error:
logging.exception(f'While processing duplex {barcode}, order {order}, mate {mate}:')
raise
except (OSError, subprocess.CalledProcessError) as error:
logging.warning(
f'{type(error).__name__} on family {barcode}, order {order}, mate {mate}:\n{error}'
)
alignment = None
# Compile statistics.
elapsed = time.time() - start
pairs = len(family)
logging.debug(f'{elapsed} sec for {pairs} read pairs.')
if pairs > 1:
run_stats['time'] += elapsed
run_stats['runs'] += 1
run_stats['aligned_pairs'] += pairs
if alignment is None:
logging.warning(f'Error aligning family {barcode}/{order} (read {mate}).')
run_stats['failures'] += 1
else:
output += format_msa(alignment, barcode, order, mate)
return output, run_stats
def align_family(family, mate, aligner='mafft'):
"""Do a multiple sequence alignment of the reads in a family and their quality scores."""
mate = str(mate)
assert mate == '1' or mate == '2'
if len(family) == 0:
return None
elif len(family) == 1:
# If there's only one read pair, there's no alignment to be done (and MAFFT won't accept it).
aligned_seqs = [family[0]['seq'+mate]]
else:
# Do the multiple sequence alignment.
aligned_seqs = make_msa(family, mate, aligner=aligner)
# Transfer the alignment to the quality scores.
## Get a list of all quality scores in the family for this mate.
quals_raw = [pair['qual'+mate] for pair in family]
qual_alignment = seqtools.transfer_gaps_multi(quals_raw, aligned_seqs, gap_char_out=' ')
# Package them up in the output data structure.
alignment = []
for pair, aligned_seq, aligned_qual in zip(family, aligned_seqs, qual_alignment):
alignment.append({'name':pair['name'+mate], 'seq':aligned_seq, 'qual':aligned_qual})
return alignment
def make_msa(family, mate, aligner='mafft'):
if aligner == 'mafft':
return make_msa_mafft(family, mate)
elif aligner == 'kalign':
return make_msa_kalign(family, mate)
elif aligner == 'dummy':
return make_msa_dummy(family, mate)
def make_msa_dummy(family, mate):
logging.info('Aligning with dummy.')
return [pair['seq'+mate] for pair in family]
def make_msa_kalign(family, mate):
logging.info('Aligning with kalign.')
try:
# Import in the child process in case there's any issue in the .so with shared state between
# processes (maybe not possible, but just in case).
from kalign import kalign
except ImportError:
logging.critical('Error importing kalign module. Check that the submodule is installed properly.')
raise
seqs = [pair['seq'+mate] for pair in family]
aligned_seqs = kalign.align(seqs)
return aligned_seqs
def make_msa_mafft(family, mate):
"""Perform a multiple sequence alignment on a set of sequences and parse the result.
Uses MAFFT."""
logging.info('Aligning with mafft.')
#TODO: Replace with tempfile.mkstemp()?
with tempfile.NamedTemporaryFile('w', delete=False, prefix='align.msa.') as family_file:
for pair in family:
name = pair['name'+mate]
seq = pair['seq'+mate]
family_file.write('>'+name+'\n')
family_file.write(seq+'\n')
with open(os.devnull, 'w') as devnull:
try:
command = ['mafft', '--nuc', '--quiet', family_file.name]
output = subprocess.check_output(command, stderr=devnull)
except (OSError, subprocess.CalledProcessError):
raise
finally:
# Make sure we delete the temporary file.
os.remove(family_file.name)
return read_fasta(output)
def read_fasta(fasta):
"""Quick and dirty FASTA parser. Return the sequences and their names.
Returns a list of sequences.
Warning: Reads the entire contents of the file into memory at once."""
sequences = []
sequence = ''
for line in fasta.splitlines():
if line.startswith('>'):
if sequence:
sequences.append(sequence.upper())
sequence = ''
continue
sequence += line.strip()
if sequence:
sequences.append(sequence.upper())
return sequences
def format_msa(align, barcode, order, mate, outfile=sys.stdout):
output = ''
for seq in align:
output += f'{barcode}\t{order}\t{mate}\t{seq["name"]}\t{seq["seq"]}\t{seq["qual"]}\n'
return output
def process_result(result, stats):
"""Process the outcome of a duplex run.
Print the aligned output and sum the stats from the run with the running totals."""
output, run_stats = result
for key, value in run_stats.items():
stats[key] += value
if output:
sys.stdout.write(output)
def tone_down_logger():
"""Change the logging level names from all-caps to capitalized lowercase.
E.g. "WARNING" -> "Warning" (turn down the volume a bit in your log files)"""
for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):
level_name = logging.getLevelName(level)
logging.addLevelName(level, level_name.capitalize())
def fail(message):
sys.stderr.write(message+"\n")
sys.exit(1)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
makrutenko/dunovo
|
align-families.py
|
Python
|
isc
| 18,275
|
[
"Galaxy"
] |
87fadd51b66deea3334ed5133f0ca696a8a865624653afd24abaa3f895c8d7cf
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode import command
from commoncode import fileutils
from commoncode.testcase import FileBasedTesting
class TestCommand(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# tuples of supported osarch, osnoarch, noarch
os_arches_test_matrix = [
('linux-32', 'linux-noarch', 'noarch'),
('linux-64', 'linux-noarch', 'noarch'),
('mac-32', 'mac-noarch', 'noarch'),
('mac-64', 'mac-noarch', 'noarch'),
('win-32', 'win-noarch', 'noarch'),
('win-64', 'win-noarch', 'noarch'),
]
# os_arch -> (bin_dir, lib_dir, (bin_dir files,) (lib_dir files,) ,)
os_arches_files_test_matrix = {
'linux-32': (
'command/bin/linux-32/bin',
'command/bin/linux-32/lib',
('cmd'),
('libmagic32.so'),
),
'linux-64': (
'command/bin/linux-64/bin',
'command/bin/linux-64/lib',
('cmd'),
('libmagic64.so'),
),
'linux-noarch': (
'command/bin/linux-noarch/bin',
'command/bin/linux-noarch/bin',
('cmd'),
(),
),
'mac-32': (
'command/bin/mac-32/bin',
'command/bin/mac-32/lib',
('cmd'),
('libmagic.dylib'),
),
'mac-64': (
'command/bin/mac-64/bin',
'command/bin/mac-64/lib',
('cmd'),
('libmagic.dylib'),
),
'mac-noarch': (
'command/bin/mac-noarch/bin',
'command/bin/mac-noarch/bin',
('cmd'),
(),
),
'win-32': (
'command/bin/win-32/bin',
'command/bin/win-32/bin',
('cmd.exe',
'magic1.dll'),
('cmd.exe',
'magic1.dll'),
),
'win-64': (
'command/bin/win-64/bin',
'command/bin/win-64/bin',
('cmd.exe',
'magic1.dll'),
('cmd.exe',
'magic1.dll'),
),
'win-noarch': (
'command/bin/win-noarch/bin',
'command/bin/win-noarch/bin',
('cmd.exe',
'some.dll'),
('cmd.exe',
'some.dll'),
),
'noarch': (
'command/bin/noarch/bin',
'command/bin/noarch/lib',
('cmd'),
('l'),
),
'junk': (None, None, (), (),),
}
os_arches_locations_test_matrix = [
('linux-32', 'linux-noarch', 'noarch'),
('linux-64', 'linux-noarch', 'noarch'),
('linux-32', 'linux-noarch', None),
('linux-64', 'linux-noarch', None),
('linux-32', None, None),
('linux-64', None, None),
(None, 'linux-noarch', 'noarch'),
(None, 'linux-noarch', None),
('mac-32', 'mac-noarch', 'noarch'),
('mac-64', 'mac-noarch', 'noarch'),
('mac-32', 'mac-noarch', None),
('mac-64', 'mac-noarch', None),
('mac-32', None, None),
('mac-64', None, None),
(None, 'mac-noarch', 'noarch'),
(None, 'mac-noarch', None),
('win-32', 'win-noarch', 'noarch'),
('win-64', 'win-noarch', 'noarch'),
('win-32', 'win-noarch', None),
('win-64', 'win-noarch', None),
('win-32', None, None),
('win-64', None, None),
(None, 'win-noarch', 'noarch'),
(None, 'win-noarch', None),
(None, None, 'noarch'),
]
def test_execute_non_ascii_output(self):
# Popen returns a *binary* string with non-ascii chars: skips these
rc, stdout, stderr = command.execute(
'python', ['-c', "print 'non ascii: \\xe4 just passed it !'"]
)
assert rc == 0
assert stderr == ''
# converting to Unicode could cause an "ordinal not in range..."
# exception
assert stdout == 'non ascii: just passed it !'
unicode(stdout)
def test_os_arch_dir(self):
root_dir = self.get_test_loc('command/bin', copy=True)
for _os_arch, _os_noarch, _noarch in self.os_arches_test_matrix:
assert command.os_arch_dir(root_dir, _os_arch).endswith(_os_arch)
assert command.os_noarch_dir(root_dir, _os_noarch).endswith(_os_noarch)
assert command.noarch_dir(root_dir, _noarch).endswith(_noarch)
def test_get_base_dirs(self):
root_dir = self.get_test_loc('command/bin', copy=True)
for _os_arch, _os_noarch, _noarch in self.os_arches_test_matrix:
bds = command.get_base_dirs(root_dir, _os_arch, _os_noarch, _noarch)
assert bds
for bd in bds:
assert os.path.exists(bd)
def test_get_bin_lib_dirs(self):
root_dir = self.get_test_loc('command/bin', copy=True)
for os_arch, paths in self.os_arches_files_test_matrix.items():
base_dir = os.path.join(root_dir, os_arch)
bin_dir, lib_dir = command.get_bin_lib_dirs(base_dir)
expected_bin, expected_lib, expected_bin_files, expected_lib_files = paths
def norm(p):
return os.path.abspath(os.path.normpath(p))
if expected_bin:
assert os.path.exists(bin_dir)
assert os.path.isdir(bin_dir)
pbd = fileutils.as_posixpath(bin_dir)
assert pbd.endswith(expected_bin.replace('command/', ''))
if expected_bin_files:
assert all(f in expected_bin_files for f in os.listdir(bin_dir)) == True
else:
assert expected_bin == bin_dir
if expected_lib:
assert os.path.exists(lib_dir)
assert os.path.isdir(lib_dir)
pld = fileutils.as_posixpath(lib_dir)
assert pld.endswith(expected_lib.replace('command/', ''))
if expected_lib_files:
assert all(f in expected_lib_files for f in os.listdir(lib_dir)) == True
else:
assert expected_lib == lib_dir
def test_get_locations_missing(self):
assert command.get_locations('ctags', None) == (None, None, None)
assert command.get_locations('dir', None) == (None, None, None)
assert command.get_locations('ctags', '.') == (None, None, None)
def test_get_locations(self):
root_dir = self.get_test_loc('command/bin', copy=True)
cmd = 'cmd'
for test_matrix in self.os_arches_locations_test_matrix:
_os_arch, _os_noarch, _noarch = test_matrix
cmd_loc, _ , _ = command.get_locations(cmd, root_dir, _os_arch, _os_noarch, _noarch)
extension = ''
if any(x and 'win' in x for x in (_os_arch, _os_noarch, _noarch)):
extension = '.exe'
expected_cmd = cmd + extension
if cmd_loc:
assert cmd_loc.endswith(expected_cmd)
assert os.path.exists(cmd_loc)
assert os.path.isfile(cmd_loc)
|
yasharmaster/scancode-toolkit
|
tests/commoncode/test_command.py
|
Python
|
apache-2.0
| 8,505
|
[
"VisIt"
] |
cb70e1aed537de20711751e7e328ded8cd3a4b34edcb5d473375f0e56738f6e0
|
from math import ceil
from logic.smbool import SMBool
from logic.helpers import Helpers, Bosses
from logic.cache import Cache
from rom.rom_patches import RomPatches
from graph.graph_utils import getAccessPoint
from utils.parameters import Settings
class HelpersGraph(Helpers):
def __init__(self, smbm):
self.smbm = smbm
# def canEnterAndLeaveGauntletQty(self, nPB, nTanksSpark):
# sm = self.smbm
# # EXPLAINED: to access Gauntlet Entrance from Landing site we can either:
# # -fly to it (infinite bomb jumps or space jump)
# # -shinespark to it
# # -wall jump with high jump boots
# # -wall jump without high jump boots
# # then inside it to break the bomb wals:
# # -use screw attack (easy way)
# # -use power bombs
# # -use bombs
# # -perform a simple short charge on the way in
# # and use power bombs on the way out
# return sm.wand(sm.wor(sm.canFly(),
# sm.haveItem('SpeedBooster'),
# sm.wand(sm.knowsHiJumpGauntletAccess(),
# sm.haveItem('HiJump')),
# sm.knowsHiJumpLessGauntletAccess()),
# sm.wor(sm.haveItem('ScrewAttack'),
# sm.wor(sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet'),
# sm.wand(sm.canUsePowerBombs(),
# sm.wor(sm.itemCountOk('PowerBomb', nPB),
# sm.wand(sm.haveItem('SpeedBooster'),
# sm.energyReserveCountOk(nTanksSpark))))),
# sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet', 0.51),
# sm.canUseBombs()))))
#
# @Cache.decorator
# def canEnterAndLeaveGauntlet(self):
# sm = self.smbm
# return sm.wor(sm.wand(sm.canShortCharge(),
# sm.canEnterAndLeaveGauntletQty(2, 2)),
# sm.canEnterAndLeaveGauntletQty(2, 3))
@Cache.decorator
def canPassCrateriaGreenPirates(self):
sm = self.smbm
return sm.wor(sm.canPassBombPassages(), # pirates can be killed with bombs or power bombs
sm.haveMissileOrSuper(),
sm.energyReserveCountOk(1),
sm.wor(sm.haveItem('Charge'),
sm.haveItem('Ice'),
sm.haveItem('Wave'),
sm.haveItem('Spazer'),
sm.haveItem('Plasma'),
sm.haveItem('ScrewAttack')))
# from blue brin elevator
@Cache.decorator
def canAccessBillyMays(self):
sm = self.smbm
return sm.wand(sm.wor(RomPatches.has(RomPatches.BlueBrinstarBlueDoor),
sm.traverse('ConstructionZoneRight')),
sm.canUsePowerBombs(),
sm.canGravLessLevel1())
# @Cache.decorator
# def canAccessKraidsLair(self):
# sm = self.smbm
# # EXPLAINED: access the upper right platform with either:
# # -hijump boots (easy regular way)
# # -fly (space jump or infinite bomb jump)
# # -know how to wall jump on the platform without the hijump boots
# return sm.wand(sm.haveItem('Super'),
# sm.wor(sm.haveItem('HiJump'),
# sm.canFly(),
# sm.knowsEarlyKraid()))
#
# @Cache.decorator
# def canPassMoat(self):
# sm = self.smbm
# # EXPLAINED: In the Moat we can either:
# # -use grapple or space jump (easy way)
# # -do a continuous wall jump (https://www.youtube.com/watch?v=4HVhTwwax6g)
# # -do a diagonal bomb jump from the middle platform (https://www.youtube.com/watch?v=5NRqQ7RbK3A&t=10m58s)
# # -do a short charge from the Keyhunter room (https://www.youtube.com/watch?v=kFAYji2gFok)
# # -do a gravity jump from below the right platform
# # -do a mock ball and a bounce ball (https://www.youtube.com/watch?v=WYxtRF--834)
# # -with gravity, either hijump or IBJ
# return sm.wor(sm.wor(sm.haveItem('Grapple'),
# sm.haveItem('SpaceJump'),
# sm.knowsContinuousWallJump()),
# sm.wor(sm.wand(sm.knowsDiagonalBombJump(), sm.canUseBombs()),
# sm.canSimpleShortCharge(),
# sm.wand(sm.haveItem('Gravity'),
# sm.wor(sm.knowsGravityJump(),
# sm.haveItem('HiJump'),
# sm.canInfiniteBombJump())),
# sm.wand(sm.knowsMockballWs(), sm.canUseSpringBall())))
#
@Cache.decorator
def canPassMoatReverse(self):
sm = self.smbm
return sm.wand(sm.haveItem('Gravity'),
# TODO::try with a spring ball jump
sm.wor(sm.canFly(),
sm.haveItem('HiJump'),
sm.canShortCharge()))
# @Cache.decorator
# def canPassSpongeBath(self):
# sm = self.smbm
# return sm.wor(sm.wand(sm.canPassBombPassages(),
# sm.knowsSpongeBathBombJump()),
# sm.wand(sm.haveItem('HiJump'),
# sm.knowsSpongeBathHiJump()),
# sm.wor(sm.haveItem('Gravity'),
# sm.haveItem('SpaceJump'),
# sm.wand(sm.haveItem('SpeedBooster'),
# sm.knowsSpongeBathSpeed()),
# sm.canSpringBallJump()))
#
# @Cache.decorator
# def canPassBowling(self):
# sm = self.smbm
# return sm.wand(Bosses.bossDead(sm, 'Phantoon'),
# sm.wor(sm.heatProof(),
# sm.energyReserveCountOk(1),
# sm.haveItem("SpaceJump"),
# sm.haveItem("Grapple")))
#
@Cache.decorator
def canAccessEtecoons(self):
sm = self.smbm
return sm.wand(sm.canUsePowerBombs(),
# beetoms
sm.wor(sm.haveMissileOrSuper(),
sm.canUsePowerBombs(),
sm.haveItem('ScrewAttack')))
# the water zone east of WS
@Cache.decorator
def canPassForgottenHighway(self):
sm = self.smbm
return wm.wand(sm.canMorphJump(),
sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump'))))
# @Cache.decorator
# def canExitCrabHole(self):
# sm = self.smbm
# return sm.wand(sm.haveItem('Morph'), # morph to exit the hole
# sm.wor(sm.wand(sm.haveItem('Gravity'), # even with gravity you need some way to climb...
# sm.wor(sm.haveItem('Ice'), # ...on crabs...
# sm.wand(sm.haveItem('HiJump'), sm.knowsMaridiaWallJumps()), # ...or by jumping
# sm.knowsGravityJump(),
# sm.canFly())),
# sm.wand(sm.haveItem('Ice'), sm.canDoSuitlessOuterMaridia()), # climbing crabs
# sm.canDoubleSpringBallJump()))
#
@Cache.decorator
def canTraverseSandPitsBottom(self):
sm = self.smbm
# quite horrible to do...
return sm.wand(sm.haveItem('Gravity'),
# eigher freeze top evir to jump on it, or use speedbooster to jump higher
# or use spacejump
sm.wor(sm.wand(sm.wor(sm.haveItem('Ice'), sm.haveItem('SpeedBooster')),
sm.haveItem('HiJump')),
sm.haveItem('SpaceJump')))
@Cache.decorator
def canTraverseSandPitsTop(self):
sm = self.smbm
# quite horrible to do...
return sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.haveItem('HiJump'), sm.haveItem('SpaceJump')))
# @Cache.decorator
# def canPassMaridiaToRedTowerNode(self):
# sm = self.smbm
# return sm.wand(sm.haveItem('Morph'),
# sm.wor(RomPatches.has(RomPatches.AreaRandoGatesBase),
# sm.haveItem('Super')))
#
def canEnterCathedral(self, mult=1.0):
sm = self.smbm
return sm.wand(sm.traverse('CathedralEntranceRight'),
sm.haveItem('Morph'))
# sm.wor(sm.wand(sm.canHellRun('MainUpperNorfair', mult),
# sm.wor(sm.wor(RomPatches.has(RomPatches.CathedralEntranceWallJump),
# sm.haveItem('HiJump'),
# sm.canFly()),
# sm.wor(sm.haveItem('SpeedBooster'), # spark
# sm.canSpringBallJump()))),
# sm.wand(sm.canHellRun('MainUpperNorfair', 0.5*mult),
# sm.haveItem('Morph'),
# sm.knowsNovaBoost())))
#
# @Cache.decorator
# def canClimbBubbleMountain(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('HiJump'),
# sm.canFly(),
# sm.haveItem('Ice'),
# sm.knowsBubbleMountainWallJump())
#
@Cache.decorator
def canFallToSpeedBooster(self):
sm = self.smbm
# TODO::new hellrun table
return sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Speed Booster'])
@Cache.decorator
def canGetBackFromSpeedBooster(self):
sm = self.smbm
# TODO::new hellrun table
return sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Speed Booster'])
@Cache.decorator
def canAccessDoubleChamberItems(self):
sm = self.smbm
hellRun = Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Wave']
return sm.wand(sm.haveItem('Morph'), sm.canHellRun(**hellRun))
# @Cache.decorator
# def canExitCathedral(self):
# # from top: can use bomb/powerbomb jumps
# # from bottom: can do a shinespark or use space jump
# # can do it with highjump + wall jump
# # can do it with only two wall jumps (the first one is delayed like on alcatraz)
# # can do it with a spring ball jump from wall
# sm = self.smbm
# return sm.wand(sm.wor(sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Norfair Entrance']),
# sm.heatProof()),
# sm.wor(sm.wor(sm.canPassBombPassages(),
# sm.haveItem("SpeedBooster")),
# sm.wor(sm.haveItem("SpaceJump"),
# sm.haveItem("HiJump"),
# sm.knowsWallJumpCathedralExit(),
# sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall()))))
@Cache.decorator
def canWallJumpInLava(self):
# without gravity samus take damage in lava
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
# TODO::add lava in settings
sm.energyReserveCountOk(Settings.lava))
@Cache.decorator
def canClimbAttic(self):
# requires hijump or space jump
sm = self.smbm
# TODO::check if it's possible with IBJ
return sm.wor(sm.haveItem('Hijump'), sm.haveItem('SpaceJump'))
# @Cache.decorator
# def canGrappleEscape(self):
# sm = self.smbm
# return sm.wor(sm.wor(sm.haveItem('SpaceJump'),
# sm.wand(sm.canInfiniteBombJump(), # IBJ from lava...either have grav or freeze the enemy there if hellrunning (otherwise single DBJ at the end)
# sm.wor(sm.heatProof(),
# sm.haveItem('Gravity'),
# sm.haveItem('Ice')))),
# sm.haveItem('Grapple'),
# sm.wand(sm.haveItem('SpeedBooster'),
# sm.wor(sm.haveItem('HiJump'), # jump from the blocks below
# sm.knowsShortCharge())), # spark from across the grapple blocks
# sm.wand(sm.haveItem('HiJump'), sm.canSpringBallJump())) # jump from the blocks below
#
# @Cache.decorator
# def canPassFrogSpeedwayRightToLeft(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('SpeedBooster'),
# sm.wand(sm.knowsFrogSpeedwayWithoutSpeed(),
# sm.haveItem('Wave'),
# sm.wor(sm.haveItem('Spazer'),
# sm.haveItem('Plasma'))))
#
@Cache.decorator
def canEnterNorfairReserveAreaFromBubbleMoutain(self):
sm = self.smbm
return sm.wand(sm.traverse('BubbleMountainTopLeft'),
sm.wor(sm.canFly(),
# TODO::check with ice and hijump
sm.haveItem('Ice'),
sm.haveItem('HiJump'))),
# @Cache.decorator
# def canEnterNorfairReserveAreaFromBubbleMoutainTop(self):
# sm = self.smbm
# return sm.wand(sm.traverse('BubbleMountainTopLeft'),
# sm.wor(sm.haveItem('Grapple'),
# sm.haveItem('SpaceJump'),
# sm.knowsNorfairReserveDBoost()))
#
# @Cache.decorator
# def canPassLavaPit(self):
# sm = self.smbm
# nTanks4Dive = 8 / sm.getDmgReduction()[0]
# if sm.haveItem('HiJump').bool == False:
# nTanks4Dive = ceil(nTanks4Dive * 1.25)
# return sm.wand(sm.wor(sm.wand(sm.haveItem('Gravity'), sm.haveItem('SpaceJump')),
# sm.wand(sm.knowsGravityJump(), sm.haveItem('Gravity'), sm.wor(sm.haveItem('HiJump'), sm.knowsLavaDive())),
# sm.wand(sm.wor(sm.wand(sm.knowsLavaDive(), sm.haveItem('HiJump')),
# sm.knowsLavaDiveNoHiJump()),
# sm.energyReserveCountOk(nTanks4Dive))),
# sm.canUsePowerBombs()) # power bomb blocks left and right of LN entrance without any items before
#
# @Cache.decorator
# def canPassLavaPitReverse(self):
# sm = self.smbm
# nTanks = 2
# if sm.heatProof().bool == False:
# nTanks = 6
# return sm.energyReserveCountOk(nTanks)
#
# @Cache.decorator
# def canPassLowerNorfairChozo(self):
# sm = self.smbm
# # to require one more CF if no heat protection because of distance to cover, wait times, acid...
# return sm.wand(sm.canHellRun(**Settings.hellRunsTable['LowerNorfair']['Entrance -> GT via Chozo']),
# sm.canUsePowerBombs(),
# sm.wor(RomPatches.has(RomPatches.LNChozoSJCheckDisabled), sm.haveItem('SpaceJump')))
#
# @Cache.decorator
# def canExitScrewAttackArea(self):
# sm = self.smbm
#
# return sm.wand(sm.canDestroyBombWalls(),
# sm.wor(sm.canFly(),
# sm.wand(sm.haveItem('HiJump'),
# sm.haveItem('SpeedBooster'),
# sm.wor(sm.wand(sm.haveItem('ScrewAttack'), sm.knowsScrewAttackExit()),
# sm.knowsScrewAttackExitWithoutScrew())),
# sm.wand(sm.canUseSpringBall(),
# sm.knowsSpringBallJumpFromWall()),
# sm.wand(sm.canSimpleShortCharge(), # fight GT and spark out
# sm.enoughStuffGT())))
#
# @Cache.decorator
# def canPassWorstRoom(self):
# sm = self.smbm
# return sm.wand(sm.canDestroyBombWalls(),
# sm.canPassWorstRoomPirates(),
# sm.wor(sm.canFly(),
# sm.wand(sm.knowsWorstRoomIceCharge(), sm.haveItem('Ice'), sm.canFireChargedShots()),
# sm.wor(sm.wand(sm.knowsGetAroundWallJump(), sm.haveItem('HiJump')),
# sm.knowsWorstRoomWallJump()),
# sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall())))
#
# # checks mix of super missiles/health
# def canGoThroughLowerNorfairEnemy(self, nmyHealth, nbNmy, nmyHitDmg, supDmg=300.0):
# sm = self.smbm
# # supers only
# if sm.itemCount('Super')*5*supDmg >= nbNmy*nmyHealth:
# return SMBool(True, 0, items=['Super'])
#
# # - or with taking damage as well?
# (dmgRed, redItems) = sm.getDmgReduction(envDmg=False)
# dmg = nmyHitDmg / dmgRed
# if sm.heatProof() and (sm.itemCount('Super')*5*supDmg)/nmyHealth + (sm.energyReserveCount()*100 - 2)/dmg >= nbNmy:
# # require heat proof as long as taking damage is necessary.
# # display all the available energy in the solver.
# return sm.wand(sm.heatProof(), SMBool(True, 0, items=redItems+['Super', '{}-ETank - {}-Reserve'.format(self.smbm.itemCount('ETank'), self.smbm.itemCount('Reserve'))]))
#
# return sm.knowsDodgeLowerNorfairEnemies()
#
# def canKillRedKiHunters(self, n):
# sm = self.smbm
# destroy = sm.wor(sm.haveItem('Plasma'),
# sm.haveItem('ScrewAttack'),
# sm.wand(sm.heatProof(), # this takes a loooong time ...
# sm.wor(sm.haveItem('Spazer'),
# sm.haveItem('Ice'),
# sm.wand(sm.haveItem('Charge'),
# sm.haveItem('Wave')))))
# if destroy.bool == True:
# return destroy
# return sm.canGoThroughLowerNorfairEnemy(1800.0, float(n), 200.0)
#
# @Cache.decorator
# def canPassThreeMuskateers(self):
# sm = self.smbm
# return sm.canKillRedKiHunters(6)
#
# @Cache.decorator
# def canPassRedKiHunters(self):
# sm = self.smbm
# return sm.canKillRedKiHunters(3)
#
# @Cache.decorator
# def canPassWastelandDessgeegas(self):
# sm = self.smbm
# destroy = sm.wor(sm.haveItem('Plasma'),
# sm.haveItem('ScrewAttack'),
# sm.wand(sm.heatProof(), # this takes a loooong time ...
# sm.wor(sm.haveItem('Spazer'),
# sm.wand(sm.haveItem('Charge'),
# sm.haveItem('Wave')))),
# sm.itemCountOk('PowerBomb', 4))
# if destroy.bool == True:
# return destroy
#
# return sm.canGoThroughLowerNorfairEnemy(800.0, 3.0, 160.0)
#
# @Cache.decorator
# def canPassNinjaPirates(self):
# sm = self.smbm
# return sm.wor(sm.itemCountOk('Missile', 10),
# sm.itemCountOk('Super', 2),
# sm.haveItem('Plasma'),
# sm.wor(sm.haveItem('Spazer'),
# sm.wand(sm.haveItem('Charge'),
# sm.wor(sm.haveItem('Wave'),
# sm.haveItem('Ice')))))
#
# @Cache.decorator
# def canPassWorstRoomPirates(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('ScrewAttack'),
# sm.itemCountOk('Missile', 6),
# sm.itemCountOk('Super', 3),
# sm.wor(sm.wand(sm.canFireChargedShots(), sm.haveItem('Plasma')),
# sm.wand(sm.haveItem('Charge'),
# sm.wor(sm.haveItem('Spazer'),
# sm.haveItem('Wave'),
# sm.haveItem('Ice'))),
# sm.knowsDodgeLowerNorfairEnemies()))
#
# # go though the pirates room filled with acid
# @Cache.decorator
# def canPassAmphitheaterReverse(self):
# sm = self.smbm
# dmgRed = sm.getDmgReduction()[0]
# nTanksGrav = 4 * 4/dmgRed
# nTanksNoGrav = 6 * 4/dmgRed
# return sm.wor(sm.wand(sm.haveItem('Gravity'),
# sm.energyReserveCountOk(nTanksGrav)),
# sm.wand(sm.energyReserveCountOk(nTanksNoGrav),
# sm.knowsLavaDive())) # should be a good enough skill filter for acid wall jumps with no grav...
#
# @Cache.decorator
# def canGetBackFromRidleyZone(self):
# sm = self.smbm
# return sm.wand(sm.wor(sm.canUseSpringBall(),
# sm.canUseBombs(),
# sm.haveItem('ScrewAttack'),
# sm.wand(sm.canUsePowerBombs(), sm.itemCountOk('PowerBomb', 2)),
# sm.wand(sm.haveItem('Morph'), sm.canShortCharge())), # speedball
# # in escape you don't have PBs and can't shoot bomb blocks in long tunnels
# # in wasteland and ki hunter room
# sm.wnot(sm.canUseHyperBeam()))
#
#
@Cache.decorator
def canExitMamaTurtle(self):
sm = self.smbm
# exit mama room
return sm.wand(sm.wor(sm.canFly(),
sm.haveItem('HiJump')),
# go back to main street (use crounched jump over the pirates)
sm.canGravLessLevel1())
@Cache.decorator
def canGoUpMtEverest(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
# TODO::try other suitless items / route through fish tank
sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump'),
sm.haveItem('Grapple')))
# @Cache.decorator
# def canJumpUnderwater(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('Gravity'),
# sm.wand(sm.knowsGravLessLevel1(),
# sm.haveItem('HiJump')))
#
# @Cache.decorator
# def canPassBotwoonHallway(self):
# sm = self.smbm
# return sm.wor(sm.wand(sm.haveItem('SpeedBooster'),
# sm.haveItem('Gravity')),
# sm.wand(sm.knowsMochtroidClip(), sm.haveItem('Ice')),
# sm.canCrystalFlashClip())
#
@Cache.decorator
def canDefeatBotwoon(self):
sm = self.smbm
return sm.wand(sm.enoughStuffBotwoon(),
sm.haveItem('Morph'))
@Cache.decorator
def canReachCacatacAlleyFromBotowoon(self):
sm = self.smbm
# fall through the morph maze
return sm.wand(sm.haveItem('Morph'),
sm.canGravLessLevel1(),
# enter cacatac alley from halfie climb room
sm.wor(sm.haveItem('HiJump'),
sm.haveItem('Ice'),
sm.haveItem('SpeedBooster'),
sm.canFly()))
@Cache.decorator
def canPassCacatacAlley(self):
sm = self.smbm
return sm.wand(Bosses.bossDead(sm, 'Draygon'),
# cacatac alley suitless: hijump + gravless level 1
# butterfly room suitless: hijump + ice + gravless level 2
sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.haveItem('HiJump'),
sm.haveItem('Ice'),
sm.knowsGravLessLevel2())))
# @Cache.decorator
# def canGoThroughColosseumSuitless(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('Grapple'),
# sm.haveItem('SpaceJump'),
# sm.wand(sm.haveItem('Ice'),
# sm.energyReserveCountOk(int(7.0/sm.getDmgReduction(False)[0])), # mochtroid dmg
# sm.knowsBotwoonToDraygonWithIce()))
#
@Cache.decorator
def canEnterExitAqueduct(self):
# could wait for snails and use them to jump over the hole in the middle,
# only as sequence break for now as snails make lots of damage suitless
sm = self.smbm
# break the pb and super blocks
return sm.wand(sm.canUsePowerBombs(), can.haveItem('Super'),
sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.canFly(),
sm.haveItem('HiJump')))
# IBJ underwater
sm.canInfiniteBombJumpSuitless()))
@Cache.decorator
def canGravLessLevel1(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'), sm.knowsGravLessLevel1())
@Cache.decorator
def canEnterExitBotwoon(self):
# used for post botwoon -> aqueduct bottom and post botwoon -> colosseum top right
sm = self.smbm
return sm.wand(sm.haveItem('Morph'),
sm.wor(sm.haveItem('Gravity'),
# hijump is enough for suitless
sm.wand(sm.knowsGravLessLevel1(), sm.haveItem('HiJump'))))
# @Cache.decorator
# def canColosseumToBotwoonExit(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('Gravity'),
# sm.wand(sm.knowsGravLessLevel2(),
# sm.haveItem("HiJump"),
# sm.canGoThroughColosseumSuitless()))
#
# @Cache.decorator
# def canClimbColosseum(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('Gravity'),
# sm.wand(sm.knowsGravLessLevel2(),
# sm.haveItem("HiJump"),
# sm.wor(sm.haveItem('Grapple'),
# sm.haveItem('Ice'),
# sm.knowsPreciousRoomGravJumpExit())))
#
# @Cache.decorator
# def canClimbWestSandHole(self):
# sm = self.smbm
# return sm.wor(sm.haveItem('Gravity'),
# sm.wand(sm.haveItem('HiJump'),
# sm.knowsGravLessLevel3(),
# sm.wor(sm.haveItem('SpaceJump'),
# sm.canSpringBallJump(),
# sm.knowsWestSandHoleSuitlessWallJumps())))
#
# @Cache.decorator
# def canAccessItemsInWestSandHole(self):
# sm = self.smbm
# return sm.wor(sm.wand(sm.haveItem('HiJump'), # vanilla strat
# sm.canUseSpringBall()),
# sm.wand(sm.haveItem('SpaceJump'), # alternate strat with possible double bomb jump but no difficult wj
# sm.wor(sm.canUseSpringBall(),
# sm.canUseBombs())),
# sm.wand(sm.canPassBombPassages(), # wjs and/or 3 tile mid air morph
# sm.knowsMaridiaWallJumps()))
#
# @Cache.decorator
# def getDraygonConnection(self):
# return getAccessPoint('DraygonRoomOut').ConnectedTo
#
# @Cache.decorator
# def isVanillaDraygon(self):
# return SMBool(self.getDraygonConnection() == 'DraygonRoomIn')
#
# @Cache.decorator
# def isVanillaCroc(self):
# crocRoom = getAccessPoint('Crocomire Room Top')
# return SMBool(crocRoom.ConnectedTo == 'Crocomire Speedway Bottom')
#
@Cache.decorator
def canFightDraygon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.wor(sm.knowsGravLessLevel2(),
sm.knowsGravLessLevel3())))
# @Cache.decorator
# def canDraygonCrystalFlashSuit(self):
# sm = self.smbm
# return sm.wand(sm.canCrystalFlash(),
# sm.knowsDraygonRoomCrystalFlash(),
# # ask for 4 PB pack as an ugly workaround for
# # a rando bug which can place a PB at space
# # jump to "get you out" (this check is in
# # PostAvailable condition of the Dray/Space
# # Jump locs)
# sm.itemCountOk('PowerBomb', 4))
#
# @Cache.decorator
# def canExitDraygonRoomWithGravity(self):
# sm = self.smbm
# return sm.wand(sm.haveItem('Gravity'),
# sm.wor(sm.canFly(),
# sm.knowsGravityJump(),
# sm.wand(sm.haveItem('HiJump'),
# sm.haveItem('SpeedBooster'))))
#
# @Cache.decorator
# def canGrappleExitDraygon(self):
# sm = self.smbm
# return sm.wand(sm.haveItem('Grapple'),
# sm.knowsDraygonRoomGrappleExit())
#
# @Cache.decorator
# def canExitDraygonVanilla(self):
# sm = self.smbm
# # to get out of draygon room:
# # with gravity but without highjump/bomb/space jump: gravity jump
# # to exit draygon room: grapple or crystal flash (for free shine spark)
# # to exit precious room: spring ball jump, xray scope glitch or stored spark
# return sm.wor(sm.canExitDraygonRoomWithGravity(),
# sm.wand(sm.canDraygonCrystalFlashSuit(),
# # use the spark either to exit draygon room or precious room
# sm.wor(sm.canGrappleExitDraygon(),
# sm.wand(sm.haveItem('XRayScope'),
# sm.knowsPreciousRoomXRayExit()),
# sm.canSpringBallJump())),
# # spark-less exit (no CF)
# sm.wand(sm.canGrappleExitDraygon(),
# sm.wor(sm.wand(sm.haveItem('XRayScope'),
# sm.knowsPreciousRoomXRayExit()),
# sm.canSpringBallJump())),
# sm.canDoubleSpringBallJump())
#
# @Cache.decorator
# def canExitDraygonRandomized(self):
# sm = self.smbm
# # disregard precious room
# return sm.wor(sm.canExitDraygonRoomWithGravity(),
# sm.canDraygonCrystalFlashSuit(),
# sm.canGrappleExitDraygon(),
# sm.canDoubleSpringBallJump())
#
# @Cache.decorator
# def canExitDraygon(self):
# sm = self.smbm
# if self.isVanillaDraygon():
# return self.canExitDraygonVanilla()
# else:
# return self.canExitDraygonRandomized()
#
# @Cache.decorator
# def canExitPreciousRoomVanilla(self):
# return SMBool(True) # handled by canExitDraygonVanilla
#
# @Cache.decorator
# def canExitPreciousRoomRandomized(self):
# sm = self.smbm
# suitlessRoomExit = sm.canSpringBallJump()
# if suitlessRoomExit.bool == False:
# if self.getDraygonConnection() == 'KraidRoomIn':
# suitlessRoomExit = sm.canShortCharge() # charge spark in kraid's room
# elif self.getDraygonConnection() == 'RidleyRoomIn':
# suitlessRoomExit = sm.wand(sm.haveItem('XRayScope'), # get doorstuck in compatible transition
# sm.knowsPreciousRoomXRayExit())
# return sm.wor(sm.wand(sm.haveItem('Gravity'),
# sm.wor(sm.canFly(),
# sm.knowsGravityJump(),
# sm.haveItem('HiJump'))),
# suitlessRoomExit)
#
# def canExitPreciousRoom(self):
# if self.isVanillaDraygon():
# return self.canExitPreciousRoomVanilla()
# else:
# return self.canExitPreciousRoomRandomized()
|
theonlydude/RandomMetroidSolver
|
graph/rotation/graph_helpers.py
|
Python
|
mit
| 33,119
|
[
"CRYSTAL"
] |
ddf53b071f8ed332c3755b987fed118a96fe7addf56f6a2e4d81071f6c29b307
|
#!/usr/bin/env python3
import os, re, sys
from glob import glob
from argparse import ArgumentParser
parser = ArgumentParser(prog='check-styles.py',
description="Check style table completeness")
parser.add_argument("-v", "--verbose",
action='store_true',
help="Enable verbose output")
parser.add_argument("-d", "--doc",
help="Path to LAMMPS documentation sources")
parser.add_argument("-s", "--src",
help="Path to LAMMPS sources")
args = parser.parse_args()
verbose = args.verbose
src_dir = args.src
doc_dir = args.doc
LAMMPS_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
if not src_dir:
src_dir = os.path.join(LAMMPS_DIR , 'src')
if not doc_dir:
doc_dir = os.path.join(LAMMPS_DIR, 'doc', 'src')
if not src_dir or not doc_dir:
parser.print_help()
sys.exit(1)
if not os.path.isdir(src_dir):
sys.exit(f"LAMMPS source path {src_dir} does not exist")
if not os.path.isdir(doc_dir):
sys.exit(f"LAMMPS documentation source path {doc_dir} does not exist")
headers = glob(os.path.join(src_dir, '*', '*.h'))
headers += glob(os.path.join(src_dir, '*.h'))
angle = {}
atom = {}
body = {}
bond = {}
command = {}
compute = {}
dihedral = {}
dump = {}
fix = {}
improper = {}
integrate = {}
kspace = {}
minimize = {}
pair = {}
reader = {}
region = {}
total = 0
index_pattern = re.compile(r"^.. index:: (compute|fix|pair_style|angle_style|bond_style|dihedral_style|improper_style|kspace_style)\s+([a-zA-Z0-9/_]+)$")
style_pattern = re.compile(r"(.+)Style\((.+),(.+)\)")
upper = re.compile("[A-Z]+")
gpu = re.compile("(.+)/gpu$")
intel = re.compile("(.+)/intel$")
kokkos = re.compile("(.+)/kk$")
kokkos_skip = re.compile("(.+)/kk/(host|device)$")
omp = re.compile("(.+)/omp$")
opt = re.compile("(.+)/opt$")
removed = re.compile("(.*)Deprecated$")
def load_index_entries_in_file(path):
entries = []
with open(path, 'r') as reader:
for line in reader:
m = index_pattern.match(line)
if m:
command_type = m.group(1)
style = m.group(2)
entries.append((command_type, style))
return entries
def load_index_entries():
index = {'compute': set(), 'fix': set(), 'pair_style': set(), 'angle_style': set(),
'bond_style': set(), 'dihedral_style': set(), 'improper_style': set(), 'kspace_style': set()}
rst_files = glob(os.path.join(doc_dir, '*.rst'))
for f in rst_files:
for command_type, style in load_index_entries_in_file(f):
index[command_type].add(style)
return index
def register_style(styles, name, info):
if name in styles:
for key, value in info.items():
styles[name][key] += value
else:
styles[name] = info
def add_suffix(styles, name):
suffix = ""
if styles[name]['gpu']:
suffix += 'g'
if styles[name]['intel']:
suffix += 'i'
if styles[name]['kokkos']:
suffix += 'k'
if styles[name]['omp']:
suffix += 'o'
if styles[name]['opt']:
suffix += 't'
if suffix:
return f"{name} ({suffix})"
else:
return name
def check_style(filename, dirname, pattern, styles, name, suffix=False, skip=set()):
with open(os.path.join(dirname, filename)) as f:
text = f.read()
matches = re.findall(pattern, text, re.MULTILINE)
counter = 0
for c in styles:
# known undocumented aliases we need to skip
if c in skip: continue
s = c
if suffix: s = add_suffix(styles, c)
if not s in matches:
if not styles[c]['removed']:
print(f"{name} style entry {s} is missing or incomplete in {filename}")
counter += 1
return counter
def check_style_index(name, styles, index, skip=[]):
counter = 0
for style in styles:
if style not in index and not styles[style]['removed'] and style not in skip:
print(f"{name} index entry {style} is missing")
counter += 1
for suffix in styles[style]:
if suffix == 'removed': continue
if suffix == 'kokkos':
suffix_style = f"{style}/kk"
else:
suffix_style = f"{style}/{suffix}"
if styles[style][suffix] and suffix_style not in index and style not in skip:
print(f"{name} index entry {suffix_style} is missing")
counter += 1
return counter
for header in headers:
if verbose: print("Checking ", header)
with open(header) as f:
for line in f:
matches = style_pattern.findall(line)
for m in matches:
# skip over internal styles w/o explicit documentation
style = m[1]
total += 1
if upper.match(style):
continue
# detect, process, and flag suffix styles:
info = { 'kokkos': 0, 'gpu': 0, 'intel': 0, \
'omp': 0, 'opt': 0, 'removed': 0 }
suffix = kokkos_skip.match(style)
if suffix:
continue
suffix = gpu.match(style)
if suffix:
style = suffix.groups()[0]
info['gpu'] = 1
suffix = intel.match(style)
if suffix:
style = suffix.groups()[0]
info['intel'] = 1
suffix = kokkos.match(style)
if suffix:
style = suffix.groups()[0]
info['kokkos'] = 1
suffix = omp.match(style)
if suffix:
style = suffix.groups()[0]
info['omp'] = 1
suffix = opt.match(style)
if suffix:
style = suffix.groups()[0]
info['opt'] = 1
deprecated = removed.match(m[2])
if deprecated:
info['removed'] = 1
# register style and suffix flags
if m[0] == 'Angle':
register_style(angle,style,info)
elif m[0] == 'Atom':
register_style(atom,style,info)
elif m[0] == 'Body':
register_style(body,style,info)
elif m[0] == 'Bond':
register_style(bond,style,info)
elif m[0] == 'Command':
register_style(command,style,info)
elif m[0] == 'Compute':
register_style(compute,style,info)
elif m[0] == 'Dihedral':
register_style(dihedral,style,info)
elif m[0] == 'Dump':
register_style(dump,style,info)
elif m[0] == 'Fix':
register_style(fix,style,info)
elif m[0] == 'Improper':
register_style(improper,style,info)
elif m[0] == 'Integrate':
register_style(integrate,style,info)
elif m[0] == 'KSpace':
register_style(kspace,style,info)
elif m[0] == 'Minimize':
register_style(minimize,style,info)
elif m[0] == 'Pair':
register_style(pair,style,info)
elif m[0] == 'Reader':
register_style(reader,style,info)
elif m[0] == 'Region':
register_style(region,style,info)
else:
print("Skipping over: ",m)
print("""Parsed style names w/o suffixes from C++ tree in %s:
Angle styles: %3d Atom styles: %3d
Body styles: %3d Bond styles: %3d
Command styles: %3d Compute styles: %3d
Dihedral styles: %3d Dump styles: %3d
Fix styles: %3d Improper styles: %3d
Integrate styles: %3d Kspace styles: %3d
Minimize styles: %3d Pair styles: %3d
Reader styles: %3d Region styles: %3d
----------------------------------------------------
Total number of styles (including suffixes): %d""" \
% (src_dir, len(angle), len(atom), len(body), len(bond), \
len(command), len(compute), len(dihedral), len(dump), \
len(fix), len(improper), len(integrate), len(kspace), \
len(minimize), len(pair), len(reader), len(region), total))
index = load_index_entries()
total_index = 0
for command_type, entries in index.items():
total_index += len(entries)
print("Total number of style index entries:", total_index)
skip_fix = ('python', 'NEIGH_HISTORY/omp','acks2/reax','qeq/reax','reax/c/bonds','reax/c/species')
skip_pair = ('meam/c','lj/sf','reax/c')
counter = 0
counter += check_style('Commands_all.rst', doc_dir, ":doc:`(.+) <.+>`",command,'Command',suffix=True)
counter += check_style('Commands_compute.rst', doc_dir, ":doc:`(.+) <compute.+>`",compute,'Compute',suffix=True)
counter += check_style('compute.rst', doc_dir, ":doc:`(.+) <compute.+>` -",compute,'Compute',suffix=False)
counter += check_style('Commands_fix.rst', doc_dir, ":doc:`(.+) <fix.+>`",fix,'Fix',skip=skip_fix,suffix=True)
counter += check_style('fix.rst', doc_dir, ":doc:`(.+) <fix.+>` -",fix,'Fix',skip=skip_fix,suffix=False)
counter += check_style('Commands_pair.rst', doc_dir, ":doc:`(.+) <pair.+>`",pair,'Pair',skip=skip_pair,suffix=True)
counter += check_style('pair_style.rst', doc_dir, ":doc:`(.+) <pair.+>` -",pair,'Pair',skip=skip_pair,suffix=False)
counter += check_style('Commands_bond.rst', doc_dir, ":doc:`(.+) <bond.+>`",bond,'Bond',suffix=True)
counter += check_style('bond_style.rst', doc_dir, ":doc:`(.+) <bond.+>` -",bond,'Bond',suffix=False)
counter += check_style('Commands_bond.rst', doc_dir, ":doc:`(.+) <angle.+>`",angle,'Angle',suffix=True)
counter += check_style('angle_style.rst', doc_dir, ":doc:`(.+) <angle.+>` -",angle,'Angle',suffix=False)
counter += check_style('Commands_bond.rst', doc_dir, ":doc:`(.+) <dihedral.+>`",dihedral,'Dihedral',suffix=True)
counter += check_style('dihedral_style.rst', doc_dir, ":doc:`(.+) <dihedral.+>` -",dihedral,'Dihedral',suffix=False)
counter += check_style('Commands_bond.rst', doc_dir, ":doc:`(.+) <improper.+>`",improper,'Improper',suffix=True)
counter += check_style('improper_style.rst', doc_dir, ":doc:`(.+) <improper.+>` -",improper,'Improper',suffix=False)
counter += check_style('Commands_kspace.rst', doc_dir, ":doc:`(.+) <kspace_style>`",kspace,'KSpace',suffix=True)
if counter:
print(f"Found {counter} issue(s) with style lists")
counter = 0
counter += check_style_index("compute", compute, index["compute"])
counter += check_style_index("fix", fix, index["fix"], skip=['python','acks2/reax','qeq/reax','reax/c/bonds','reax/c/species'])
counter += check_style_index("angle_style", angle, index["angle_style"])
counter += check_style_index("bond_style", bond, index["bond_style"])
counter += check_style_index("dihedral_style", dihedral, index["dihedral_style"])
counter += check_style_index("improper_style", improper, index["improper_style"])
counter += check_style_index("kspace_style", kspace, index["kspace_style"])
counter += check_style_index("pair_style", pair, index["pair_style"], skip=['meam/c', 'lj/sf','reax/c'])
if counter:
print(f"Found {counter} issue(s) with style index")
|
akohlmey/lammps
|
doc/utils/check-styles.py
|
Python
|
gpl-2.0
| 11,467
|
[
"LAMMPS"
] |
179d00d7b5e4bea2ba70e88483819d01b3fac0b6837d7c158d37c215d3ce07c4
|
from __future__ import print_function
import sys
import os
import io
try:
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
except ImportError:
from distutils.core import setup
from distutils.command.install import install as _install
def find_packages():
return ['ugali','ugali.analysis','ugali.config','ugali.observation',
'ugali.preprocess','ugali.simulation','ugali.candidate',
'ugali.utils']
import distutils.cmd
import versioneer
VERSION = versioneer.get_version()
NAME = 'ugali'
HERE = os.path.abspath(os.path.dirname(__file__))
URL = 'https://github.com/DarkEnergySurvey/ugali'
DESC = "Ultra-faint galaxy likelihood toolkit."
LONG_DESC = "%s\n%s"%(DESC,URL)
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: POSIX :: Linux
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Astronomy
Topic :: Scientific/Engineering :: Physics
"""
RELEASE_URL = URL+'/releases/download/v1.8.0'
UGALIDIR = os.getenv("UGALIDIR","$HOME/.ugali")
ISOSIZE = "~1MB"
CATSIZE = "~20MB"
TSTSIZE = "~1MB"
# Could find file size dynamically, but it's a bit slow...
# int(urllib.urlopen(ISOCHRONES).info().getheaders("Content-Length")[0])/1024**2
SURVEYS = ['des','ps1','sdss','lsst']
MODELS = ['bressan2012','marigo2017','dotter2008','dotter2016']
class ProgressFileIO(io.FileIO):
def __init__(self, path, *args, **kwargs):
self._total_size = os.path.getsize(path)
io.FileIO.__init__(self, path, *args, **kwargs)
def read(self, size):
count = self.tell()/size
self.progress_bar(count,size,self._total_size)
return io.FileIO.read(self, size)
@staticmethod
def progress_bar(count, block_size, total_size):
block = 100*block_size/float(total_size)
progress = count*block
if progress % 5 < 1.01*block:
msg = '\r[{:51}] ({:d}%)'.format(int(progress//2)*'='+'>',int(progress))
sys.stdout.write(msg)
sys.stdout.flush()
class TarballCommand(distutils.cmd.Command,object):
""" Command for downloading data files """
description = "install data files"
user_options = [
('ugali-dir=',None,
'path to install data files [default: %s]'%UGALIDIR),
('force','f',
'force installation (overwrite any existing files)')
]
boolean_options = ['force']
release_url = RELEASE_URL
_tarball = None
_dirname = None
def initialize_options(self):
self.ugali_dir = os.path.expandvars(UGALIDIR)
self.force = False
# Not really the best way, but ok...
self.tarball = self._tarball
self.dirname = self._dirname
def finalize_options(self):
# Required by abstract base class
pass
@property
def path(self):
return os.path.join(self.ugali_dir,self.dirname)
def check_exists(self):
return os.path.exists(self.path)
def install_tarball(self, tarball):
try:
from urllib.request import urlopen, urlretrieve
from urllib.error import HTTPError
except ImportError:
from urllib import urlopen, urlretrieve
from urllib2 import HTTPError
import tarfile
if not os.path.exists(self.ugali_dir):
print("creating %s"%self.ugali_dir)
os.makedirs(self.ugali_dir)
os.chdir(self.ugali_dir)
url = os.path.join(self.release_url,tarball)
print("downloading %s..."%url)
if urlopen(url).getcode() >= 400:
raise Exception('url does not exist')
urlretrieve(url,tarball,reporthook=ProgressFileIO.progress_bar)
print('')
if not os.path.exists(tarball):
raise HTTPError()
print("extracting %s..."%tarball)
with tarfile.open(fileobj=ProgressFileIO(tarball),mode='r:gz') as tar:
## Check if the directory exists?
#if os.path.exists(tar.next().name) and not self.force:
# print("directory found; skipping installation")
tar.extractall()
tar.close()
print('')
print("removing %s"%tarball)
os.remove(tarball)
def run(self):
if self.dry_run:
print("skipping data install")
return
if self.check_exists():
print("found %s"%self.path)
if self.force:
print("overwriting directory")
else:
print("use '--force' to overwrite")
return
self.install_tarball(self.tarball)
class CatalogCommand(TarballCommand):
""" Command for downloading catalog files """
description = "install catalog files"
_tarball = 'ugali-catalogs.tar.gz'
_dirname = 'catalogs'
class TestsCommand(TarballCommand):
""" Command for downloading catalog files """
description = "install test data"
_tarball = 'ugali-test-data.tar.gz'
_dirname = 'testdata'
class IsochroneCommand(TarballCommand):
""" Command for downloading isochrone files """
description = "install isochrone files"
user_options = TarballCommand.user_options + [
('survey=',None,
'survey set [default: None]'),
('model=',None,
'isochrone model [default: None]')
]
_tarball = 'ugali-isochrones-tiny.tar.gz'
_dirname = 'isochrones'
def initialize_options(self):
super(IsochroneCommand,self).initialize_options()
self.survey = None
self.model = None
def finalize_options(self):
super(IsochroneCommand,self).finalize_options()
self._build_surveys()
self._build_models()
def _build_surveys(self):
if self.survey is None:
self.surveys = SURVEYS
else:
self.survey = self.survey.lower()
if self.survey not in SURVEYS:
raise Exception("unrecognized survey: '%s'"%self.survey)
self.surveys = [self.survey]
def _build_models(self):
if self.model is None:
self.models = MODELS
else:
self.model = self.model.lower()
if self.model not in MODELS:
raise Exception("unrecognized model: '%s'"%self.model)
self.models = [self.model]
def run(self):
if self.dry_run:
print("skipping data install")
return
if (self.survey is None) and (self.model is None):
self.tarball = self._tarball
self.dirname = self._dirname
super(IsochroneCommand,self).run()
return
for survey in self.surveys:
for model in self.models:
self.tarball = "ugali-%s-%s.tar.gz"%(survey,model)
self.dirname = "isochrones/%s/%s"%(survey,model)
super(IsochroneCommand,self).run()
class install(_install):
"""
Subclass the setuptools 'install' class.
"""
user_options = _install.user_options + [
('isochrones',None,"install isochrone files (%s)"%ISOSIZE),
('catalogs',None,"install catalog files (%s)"%CATSIZE),
('tests',None,"install test data (%s)"%TSTSIZE),
('ugali-dir=',None,"install file directory [default: %s]"%UGALIDIR),
]
boolean_options = _install.boolean_options + ['isochrones','catalogs']
def initialize_options(self):
_install.initialize_options(self)
self.ugali_dir = os.path.expandvars(UGALIDIR)
self.isochrones = False
self.catalogs = False
self.tests = False
def run(self):
# run superclass install
_install.run(self)
# Could ask user whether they want to install isochrones, but
# pip filters sys.stdout, so the prompt never gets sent:
# https://github.com/pypa/pip/issues/2732#issuecomment-97119093
if self.isochrones:
self.install_isochrones()
if self.catalogs:
self.install_catalogs()
if self.tests:
self.install_tests()
def install_isochrones(self):
"""
Call to isochrone install command:
http://stackoverflow.com/a/24353921/4075339
"""
cmd_obj = self.distribution.get_command_obj('isochrones')
cmd_obj.force = self.force
if self.ugali_dir: cmd_obj.ugali_dir = self.ugali_dir
self.run_command('isochrones')
def install_catalogs(self):
"""
Call to catalog install command:
http://stackoverflow.com/a/24353921/4075339
"""
cmd_obj = self.distribution.get_command_obj('catalogs')
cmd_obj.force = self.force
if self.ugali_dir: cmd_obj.ugali_dir = self.ugali_dir
self.run_command('catalogs')
def install_tests(self):
"""
Call to catalog install command:
http://stackoverflow.com/a/24353921/4075339
"""
cmd_obj = self.distribution.get_command_obj('tests')
cmd_obj.force = self.force
if self.ugali_dir: cmd_obj.ugali_dir = self.ugali_dir
self.run_command('tests')
CMDCLASS = versioneer.get_cmdclass()
CMDCLASS['isochrones'] = IsochroneCommand
CMDCLASS['catalogs'] = CatalogCommand
CMDCLASS['tests'] = TestsCommand
CMDCLASS['install'] = install
setup(
name=NAME,
version=VERSION,
cmdclass=CMDCLASS,
url=URL,
author='Keith Bechtol & Alex Drlica-Wagner',
author_email='bechtol@wisc.edu, kadrlica@fnal.gov',
scripts = [],
install_requires=[
'astropy',
'matplotlib',
'numpy >= 1.9.0',
'scipy >= 0.14.0',
'healpy >= 1.6.0',
'fitsio >= 0.9.10',
'emcee >= 2.1.0',
'corner >= 1.0.0',
'pyyaml >= 3.10',
],
packages=find_packages(),
description=DESC,
long_description=LONG_DESC,
platforms='any',
classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
DarkEnergySurvey/ugali
|
setup.py
|
Python
|
mit
| 10,264
|
[
"Galaxy"
] |
92fc565a6e172ed7f16f84f88cf1dade56aafb1f327f1293f5efe9fc73d59562
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2014 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import os
import unittest
from mock import Mock
import pytest
from freeseer.framework.youtube import Response
from freeseer.framework.youtube import YoutubeService
class TestYoutubeService(unittest.TestCase):
SAMPLE_VIDEO = os.path.join(os.path.dirname(__file__), 'sample_video.ogg')
SAMPLE_VIDEO_METADATA = {
'tags': [
'Freeseer',
'FOSSLC',
'Open Source',
],
'categoryId': 27,
'description': 'At Test by Alex recorded on 2014-03-09',
'title': u'Test',
}
def test_get_metadata(self):
"""Test retrieval of metadata from video file.
Case: Returned metadata should be equal to sample video's metadata."""
metadata = YoutubeService.get_metadata(self.SAMPLE_VIDEO)
self.assertDictEqual(self.SAMPLE_VIDEO_METADATA, metadata)
def test_upload_video(self):
"""Test uploading a video file using mocks"""
youtube = YoutubeService()
youtube.upload_video = Mock(return_value=(Response.SUCCESS, None))
response_code, response = youtube.upload_video(self.SAMPLE_VIDEO)
youtube.upload_video.assert_called_with(self.SAMPLE_VIDEO)
self.assertEqual(Response.SUCCESS, response_code)
@pytest.mark.parametrize("video, expected", [
("/path/to/test.ogg", True),
("test.webm", True),
("asdfg.qwergb", False),
])
def test_valid_video_file(video, expected):
"""Tests valid_video_file function for all test cases."""
assert YoutubeService.valid_video_file(video) == expected
|
Freeseer/freeseer
|
src/freeseer/tests/framework/test_youtube.py
|
Python
|
gpl-3.0
| 2,479
|
[
"VisIt"
] |
53bdd28a88f0736679d99a1e57946564174dcb28c9034a63461110b555b16acc
|
"""
================================
Workshop: Dartmouth College 2010
================================
First lets go to the directory with the data we'll be working on and start the interactive python interpreter
(with some nipype specific configuration). Note that nipype does not need to be run through ipython - it is
just much nicer to do interactive work in it.
.. sourcecode:: bash
cd $TDPATH
ipython -p nipype
For every neuroimaging procedure supported by nipype there exists a wrapper - a small piece of code managing
the underlying software (FSL, SPM, AFNI etc.). We call those interfaces. They are standarised so we can hook them up
together. Lets have a look at some of them.
.. sourcecode:: ipython
In [1]: import nipype.interfaces.fsl as fsl
In [2]: fsl.BET.help()
Inputs
------
Mandatory:
in_file: input file to skull strip
Optional:
args: Additional parameters to the command
center: center of gravity in voxels
environ: Environment variables (default={})
frac: fractional intensity threshold
functional: apply to 4D fMRI data
mutually exclusive: functional, reduce_bias
mask: create binary mask image
mesh: generate a vtk mesh brain surface
no_output: Don't generate segmented output
out_file: name of output skull stripped image
outline: create surface outline image
output_type: FSL output type
radius: head radius
reduce_bias: bias field and neck cleanup
mutually exclusive: functional, reduce_bias
skull: create skull image
threshold: apply thresholding to segmented brain image and mask
vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1)
Outputs
-------
mask_file: path/name of binary brain mask (if generated)
meshfile: path/name of vtk mesh file (if generated)
out_file: path/name of skullstripped file
outline_file: path/name of outline file (if generated)
In [3]: import nipype.interfaces.freesurfer as fs
In [4]: fs.Smooth.help()
Inputs
------
Mandatory:
in_file: source volume
num_iters: number of iterations instead of fwhm
mutually exclusive: surface_fwhm
reg_file: registers volume to surface anatomical
surface_fwhm: surface FWHM in mm
mutually exclusive: num_iters
requires: reg_file
Optional:
args: Additional parameters to the command
environ: Environment variables (default={})
proj_frac: project frac of thickness a long surface normal
mutually exclusive: proj_frac_avg
proj_frac_avg: average a long normal min max delta
mutually exclusive: proj_frac
smoothed_file: output volume
subjects_dir: subjects directory
vol_fwhm: volumesmoothing outside of surface
Outputs
-------
args: Additional parameters to the command
environ: Environment variables
smoothed_file: smoothed input volume
subjects_dir: subjects directory
You can read about all of the interfaces implemented in nipype at our online documentation at http://nipy.sourceforge.net/nipype/documentation.html#documentation .
Check it out now.
Using interfaces
----------------
Having interfaces allows us to use third party software (like FSL BET) as function. Look how simple it is.
"""
from __future__ import print_function
from builtins import str
import nipype.interfaces.fsl as fsl
result = fsl.BET(in_file='data/s1/struct.nii').run()
print(result)
"""
Running a single program is not much of a breakthrough. Lets run motion correction followed by smoothing
(isotropic - in other words not using SUSAN). Notice that in the first line we are setting the output data type
for all FSL interfaces.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
result1 = fsl.MCFLIRT(in_file='data/s1/f3.nii').run()
result2 = fsl.Smooth(in_file='f3_mcf.nii.gz', fwhm=6).run()
"""
Simple workflow
---------------
In the previous example we knew that fsl.MCFLIRT will produce a file called f3_mcf.nii.gz and we have hard coded
this as an input to fsl.Smooth. This is quite limited, but luckily nipype supports joining interfaces in pipelines.
This way output of one interface will be used as an input of another without having to hard code anything. Before
connecting Interfaces we need to put them into (separate) Nodes and give them unique names. This way every interface will
process data in a separate folder.
"""
import nipype.pipeline.engine as pe
import os
motion_correct = pe.Node(interface=fsl.MCFLIRT(in_file=os.path.abspath('data/s1/f3.nii')),
name="motion_correct")
smooth = pe.Node(interface=fsl.Smooth(fwhm=6), name="smooth")
motion_correct_and_smooth = pe.Workflow(name="motion_correct_and_smooth")
motion_correct_and_smooth.base_dir = os.path.abspath('.') # define where will be the root folder for the workflow
motion_correct_and_smooth.connect([
(motion_correct, smooth, [('out_file', 'in_file')])
])
# we are connecting 'out_file' output of motion_correct to 'in_file' input of smooth
motion_correct_and_smooth.run()
"""
Another workflow
----------------
Another example of a simple workflow (calculate the mean of fMRI signal and subtract it).
This time we'll be assigning inputs after defining the workflow.
"""
calc_mean = pe.Node(interface=fsl.ImageMaths(), name="calc_mean")
calc_mean.inputs.op_string = "-Tmean"
subtract = pe.Node(interface=fsl.ImageMaths(), name="subtract")
subtract.inputs.op_string = "-sub"
demean = pe.Workflow(name="demean")
demean.base_dir = os.path.abspath('.')
demean.connect([
(calc_mean, subtract, [('out_file', 'in_file2')])
])
demean.inputs.calc_mean.in_file = os.path.abspath('data/s1/f3.nii')
demean.inputs.subtract.in_file = os.path.abspath('data/s1/f3.nii')
demean.run()
"""
Reusing workflows
-----------------
The beauty of the workflows is that they are reusable. We can just import a workflow made by someone
else and feed it with our data.
"""
from fmri_fsl import preproc
preproc.base_dir = os.path.abspath('.')
preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii')
preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii')
preproc.run()
"""
... and we can run it again and it won't actually rerun anything because none of
the parameters have changed.
"""
preproc.run()
"""
... and we can change a parameter and run it again. Only the dependent nodes
are rerun and that too only if the input state has changed.
"""
preproc.inputs.meanfuncmask.frac = 0.5
preproc.run()
"""
Visualizing workflows 1
-----------------------
So what did we run in this precanned workflow
"""
preproc.write_graph()
"""
Datasink
--------
Datasink is a special interface for copying and arranging results.
"""
import nipype.interfaces.io as nio
preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii')
preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii')
datasink = pe.Node(interface=nio.DataSink(), name='sinker')
preprocess = pe.Workflow(name='preprocout')
preprocess.base_dir = os.path.abspath('.')
preprocess.connect([
(preproc, datasink, [('meanfunc2.out_file', 'meanfunc'),
('maskfunc3.out_file', 'funcruns')])
])
preprocess.run()
"""
Datagrabber
-----------
Datagrabber is (surprise, surprise) an interface for collecting files from hard drive. It is very flexible and
supports almost any file organisation of your data you can imagine.
"""
datasource1 = nio.DataGrabber()
datasource1.inputs.template = 'data/s1/f3.nii'
datasource1.inputs.sort_filelist = True
results = datasource1.run()
print(results.outputs)
datasource2 = nio.DataGrabber()
datasource2.inputs.template = 'data/s*/f*.nii'
datasource2.inputs.sort_filelist = True
results = datasource2.run()
print(results.outputs)
datasource3 = nio.DataGrabber(infields=['run'])
datasource3.inputs.template = 'data/s1/f%d.nii'
datasource3.inputs.sort_filelist = True
datasource3.inputs.run = [3, 7]
results = datasource3.run()
print(results.outputs)
datasource4 = nio.DataGrabber(infields=['subject_id', 'run'])
datasource4.inputs.template = 'data/%s/f%d.nii'
datasource4.inputs.sort_filelist = True
datasource4.inputs.run = [3, 7]
datasource4.inputs.subject_id = ['s1', 's3']
results = datasource4.run()
print(results.outputs)
"""
Iterables
---------
Iterables is a special field of the Node class that enables to iterate all workfloes/nodes connected to it over
some parameters. Here we'll use it to iterate over two subjects.
"""
import nipype.interfaces.utility as util
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
infosource.iterables = ('subject_id', ['s1', 's3'])
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), name="datasource")
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.base_directory = os.path.abspath('data')
datasource.inputs.template_args = dict(func=[['subject_id', 'f3']], struct=[['subject_id', 'struct']])
datasource.inputs.sort_filelist = True
my_workflow = pe.Workflow(name="my_workflow")
my_workflow.base_dir = os.path.abspath('.')
my_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, preproc, [('func', 'inputspec.func'),
('struct', 'inputspec.struct')])])
my_workflow.run()
"""
and we can change a node attribute and run it again
"""
smoothnode = my_workflow.get_node('preproc.smooth')
assert(str(smoothnode) == 'preproc.smooth')
smoothnode.iterables = ('fwhm', [5., 10.])
my_workflow.run()
"""
Visualizing workflows 2
-----------------------
In the case of nested workflows, we might want to look at expanded forms of the workflow.
"""
|
BrainIntensive/OnlineBrainIntensive
|
resources/nipype/nipype/examples/workshop_dartmouth_2010.py
|
Python
|
mit
| 9,774
|
[
"VTK"
] |
ab121da835f92e2bf59b86528c910e1eeba316434bd7c9398d0777108e2f428d
|
###########################################################
# SPEpy - simplified parquet equation solver for SIAM #
# Copyright (C) 2019 Vladislav Pokorny; pokornyv@fzu.cz #
# homepage: github.com/pokornyv/SPEpy #
# siam_parquet.py - solver for SPE #
# method described in Phys. Rev. B 100, 195114 (2019). #
###########################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import scipy as sp
from scipy.integrate import simps
from scipy.optimize import brentq
from sys import argv,exit,version_info
from os import listdir
from time import ctime,time
from parlib import *
from parlib2 import *
t = time()
hashes = '#'*80
## python version
ver = str(version_info[0])+'.'+str(version_info[1])+'.'+str(version_info[2])
## header for files so we store the parameters along with data
parline = '# U = {0: .5f}, Delta = {1: .5f}, ed = {2: .5f}, h = {3: .5f}, T = {4: .5f}'\
.format(U,Delta,ed,h,T)
parfname = str(GFtype)+'_U'+str(U)+'eps'+str(ed)+'T'+str(T)+'h'+str(h)
## print the header #######################################
if chat:
print(hashes+'\n# generated by '+str(argv[0])+', '+str(ctime()))
print('# python version: '+str(ver)+', SciPy version: '+str(sp.version.version))
print('# energy axis: [{0: .5f} ..{1: .5f}], step = {2: .5f}, length = {3: 3d}'\
.format(En_A[0],En_A[-1],dE,len(En_A)))
print(parline)
print('# Kondo temperature from Bethe ansatz: Tk ~{0: .5f}'\
.format(float(KondoTemperature(U,Delta,ed))))
if SC: print('# using partial self-consistency scheme for the self-energy')
elif FSC: print('# using full self-consistency scheme for the self-energy')
else: print('# using no self-consistency scheme for the self-energy')
if SC and FSC: SC = False
if SCsolver == 'fixed':
print('# using Steffensen fixed-point algorithm to calculate Lambda vertex')
elif SCsolver == 'root':
print('# using MINPACK root to calculate Lambda vertex')
else:
print('# using iteration algorithm to calculate Lambda vertex, mixing parameter alpha = {0: .5f}'\
.format(float(alpha)))
###########################################################
## inicialize the non-interacting Green function ##########
if GFtype == 'lor':
if chat: print('# using Lorentzian non-interacting DoS')
GFlambda = lambda x: GreensFunctionLorenz(x,Delta)
DensityLambda = lambda x: DensityLorentz(x,Delta)
elif GFtype == 'semi':
if chat: print('# using semielliptic non-interacting DoS')
W = Delta ## half-bandwidth
GFlambda = lambda x: GreensFunctionSemi(x,W)
DensityLambda = lambda x: DensitySemi(x,W)
elif GFtype == 'gauss':
if chat: print('# using Gaussian non-interacting DoS')
GFlambda = lambda x: GreensFunctionGauss(x,Delta)
DensityLambda = lambda x: DensityGauss(x,Delta)
else:
print('# Error: DoS type "'+GFtype+'" not implemented.')
exit(1)
## using the Lambda from the older method as a starting point
if not Lin:
if chat: print('# calculating the fully static vertex at half-filling as a starting point:')
GFzero_A = GFlambda(En_A)
Bubble_A = TwoParticleBubble(GFzero_A,GFzero_A,'eh')
Lambda0 = CalculateLambda(Bubble_A,GFzero_A,GFzero_A)
if chat: print('# - Lambda0 = {0: .8f}'.format(Lambda0))
else:
if chat: print('# Initial guess for Lambda: {0: .6f}'.format(LIn))
########################################################
## calculate filling of the thermodynamic Green function
if chat: print('#\n# calculating the initial thermodynamic Green function:')
[nTup,nTdn] = [0.5,0.5]
[nTupOld,nTdnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn]):
[nTupOld,nTdnOld] = [nTup,nTdn]
if T == 0.0:
nup_dens = lambda x: DensityLambda(ed+U/2.0*(x+nTdn-1.0)-h) - x
ndn_dens = lambda x: DensityLambda(ed+U/2.0*(nTup+x-1.0)+h) - x
else:
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nTdn-1.0)+h)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nTup+x-1.0)-h)) - x
nTup = brentq(nup_dens,0.0,1.0,xtol = epsn)
nTdn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nTup,nTdn))
k += 1
## fill the Green functions
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+h)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-h)
## write non-interacting GF to a file, development only
#WriteFileX([GFTup_A,GFTdn_A],WriteMax,WriteStep,parline,'GFTzero.dat')
if chat: print('# - norm[GTup]: {0: .8f}, n[GTup]: {1: .8f}'\
.format(float(IntDOS(GFTup_A)),float(nTup)))
if chat: print('# - norm[GTdn]: {0: .8f}, n[GTdn]: {1: .8f}'\
.format(float(IntDOS(GFTdn_A)),float(nTdn)))
if chat: print('# - nT = {0: .8f}, mT = {1: .8f}'.format(float(nTup+nTdn),float(nTup-nTdn)))
###########################################################
## calculate the Lambda vertex ############################
if chat:
if FSC: print('#\n# calculating the full self-energy using FSC scheme:')
else: print('#\n# calculating the Hartree-Fock self-energy:')
if Lin: ## reading initial values from command line
Lambda = LIn
else: ## using the static guess
Lambda = Lambda0
[nTupOld,nTdnOld] = [1e8,1e8]
[Sigma0,Sigma1] = [U*(nTup+nTdn-1.0)/2.0,Lambda*(nTdn-nTup)/2.0]
k = 1
sumsq = 1e8 if FSC else 0.0 ## converence criterium for FSC scheme
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn, sumsq > 0.01]):
if chat: print('#\n# Iteration {0: 3d}'.format(k))
[nTupOld,nTdnOld] = [nTup,nTdn]
if FSC:
GFTupOld_A = sp.copy(GFTup_A)
## Lambda vertex
if chat: print('# - calculating Lambda vertex:')
Lambda = CalculateLambdaD(GFTup_A,GFTdn_A,Lambda)
if chat: print('# - - Lambda vertex: Lambda: {0: .8f}'.format(Lambda))
if True: ## print auxiliary functions, development only
# if False:
K = KvertexD(Lambda,GFTup_A,GFTdn_A)
if chat: print('# - - K vertex: K: {0: .8f}'.format(K))
## check the integrals:
XD = ReBDDFDD(GFTup_A,GFTdn_A,0)
if chat: print('# - - aux. integral: X: {0: .8f}'.format(XD))
## HF self-energy
if chat: print('# - calculating static self-energy:')
[Sigma0,Sigma1] = CalculateSigmaT(Lambda,Sigma0,Sigma1,GFlambda,DensityLambda)
if chat: print('# - - static self-energy: normal: {0: .8f}, anomalous: {1: .8f}'.format(Sigma0,Sigma1))
GFTup_A = GFlambda(En_A-ed-Sigma0+(h-Sigma1))
GFTdn_A = GFlambda(En_A-ed-Sigma0-(h-Sigma1))
## symmetrize the Green function if possible
if h == 0.0:
if chat: print('# - h = 0, averaging Green functions over spin to avoid numerical errors')
GFTup_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
GFTdn_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
Sigma1 = 0.0
## recalculate filling and magnetization
if any([ed!=0.0,h!=0.0]):
if T == 0.0:
nTup = DensityLambda(ed+Sigma0-(h-Sigma1))
nTdn = DensityLambda(ed+Sigma0+(h-Sigma1))
else:
nTup = Filling(GFTup_A)
nTdn = Filling(GFTdn_A)
else: ## ed = 0 and h = 0
nTup = nTdn = 0.5
## this is to convert complex to float, the warning is just a sanity check
if any([sp.fabs(sp.imag(nTup))>1e-6,sp.fabs(sp.imag(nTdn))>1e-6,]):
print('# Warning: non-zero imaginary part of nT, up: {0: .8f}, dn: {1: .8f}.'\
.format(sp.imag(nTup),sp.imag(nTdn)))
[nTup,nTdn] = [sp.real(nTup),sp.real(nTdn)]
if FSC:
## spectral self-energy ###################################
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+(h-Sigma1)-Sigma_A)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-(h-Sigma1)-Sigma_A)
## print output for given iteration
if chat:
print('# - thermodynamic Green function filling: nTup = {0: .8f}, nTdn = {1: .8f}'.format(nTup,nTdn))
print('# - ed = {0: .4f}, h = {1: .4f}: nT = {2: .8f}, mT = {3: .8f}'.format(ed,h,nTup+nTdn,nTup-nTdn))
print('{0: 3d}\t{1: .8f}\t{2: .8f}\t{3: .8f}\t{4: .8f}'.format(k,nTup,nTdn,nTup+nTdn,nTup-nTdn))
if FSC:
sumsq = sp.sum(sp.imag(GFTupOld_A-GFTup_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
if chat:
if FSC: print('# - Calculation of the Hartree-Fock self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
else: print('# - Calculation of the full spectral self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
Det_A = DeterminantGD(Lambda,GFTup_A,GFTdn_A)
Dzero = Det_A[int((len(En_A)-1)/2)]
if chat: print('# - determinant at zero energy: {0: .8f} {1:+8f}i'.format(sp.real(Dzero),sp.imag(Dzero)))
## write the determinant to a file, for development only
#WriteFileX([GFTup_A,GFTdn_A,Det_A],WriteMax,WriteStep,parline,'DetG.dat')
if SC: ## partial self-consistency between Sigma and G:
if chat: print('#\n# calculating the spectral self-energy:')
parfname = 'SC_'+ parfname
k = 1
sumsq = 1e8
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
[nUp,nDn] = [nTup,nTdn]
while sumsq > 0.06:
GFintUpOld_A = sp.copy(GFintUp_A)
## spectral self-energy ###################################
if chat: print('#\n# Iteration {0: 3d}'.format(k))
SigmaUp_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
if any([ed!=0.0,h!=0.0]):
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: ## ed = 0 and h = 0
[nUp,nDn] = [0.5,0.5]
if chat: print('# densities: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
sumsq = sp.sum(sp.imag(GFintUpOld_A-GFintUp_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
elif FSC: ## full self-consistency between Sigma and G:
parfname = 'FSC_'+ parfname
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
if any([ed!=0.0,h!=0.0]): [nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: [nUp,nDn] = [0.5,0.5]
else:
## spectral self-energy ###################################
if chat: print('#\n# calculating the spectral self-energy')
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
if chat: print('#\n# calculating the spectral Green function:')
if chat: print('# - iterating the final density:')
[nUp,nDn] = [nTup,nTdn]
[nUpOld,nDnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nUpOld-nUp) > epsn, sp.fabs(nDnOld-nDn) > epsn]):
[nUpOld,nDnOld] = [nUp,nDn]
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nDn-1.0)+(h-Sigma1)-Sigma_A)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nUp+x-1.0)-(h-Sigma1)-Sigma_A)) - x
nUp = brentq(nup_dens,0.0,1.0,xtol = epsn)
nDn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
k += 1
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
###########################################################
## calculate properties ###################################
## quasiparticle weights
[Zup,dReSEupdw] = QuasiPWeight(sp.real(SigmaUp_A))
[Zdn,dReSEdndw] = QuasiPWeight(sp.real(SigmaDn_A))
[Z,dReSEdw] = QuasiPWeight(sp.real(Sigma_A))
if chat: print('# quasiparticle weight:')
if chat: print('# - Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Z),float(dReSEdw),float(1.0/Z)))
if chat and h!=0.0:
print('# - up spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zup),float(dReSEupdw),float(1.0/Zup)))
print('# - dn spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zdn),float(dReSEdndw),float(1.0/Zdn)))
## DoS at Fermi energy
DOSFup = -sp.imag(GFintUp_A[int(N/2)])/sp.pi
DOSFdn = -sp.imag(GFintDn_A[int(N/2)])/sp.pi
## filling
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
if chat:
print('# - spectral Green function filling: nUp = {0: .8f}, nDn = {1: .8f}'.format(nUp,nDn))
print('# - ed = {0: .4f}, h = {1: .4f}: n = {2: .8f}, m = {3: .8f}'.format(ed,h,nUp+nDn,nUp-nDn))
## HWHM of the spectral function
[HWHMup,DOSmaxUp,wmaxUp] = CalculateHWHM(GFintUp_A)
[HWHMdn,DOSmaxDn,wmaxDn] = CalculateHWHM(GFintDn_A)
if any([HWHMup == 0.0,HWHMdn == 0.0]) and chat:
print('# - Warning: HWHM cannot be calculated, setting it to zero.')
elif any([HWHMup < dE,HWHMdn < dE]):
print('# - Warning: HWHM smaller than energy resolution.')
if chat: print('# - spin-up: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFup),float(DOSmaxUp),float(wmaxUp)))
if h!=0.0 and chat:
print('# - spin-dn: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFdn),float(DOSmaxDn),float(wmaxDn)))
if chat: print('# - HWHM: spin-up: {0: .8f}, spin-dn: {1: .8f}'.format(float(HWHMup),float(HWHMdn)))
## zero-field susceptibility
if h==0.0:
ChiT = sp.real(SusceptibilityTherm(Dzero,GFTup_A))
ChiS = sp.real(SusceptibilitySpecD(Lambda,ChiT,GFintUp_A))
if chat: print('# - thermodynamic susceptibility: {0: .8f}'.format(ChiT))
if chat: print('# - spectral susceptibility: {0: .8f}'.format(ChiS))
else:
ChiS = ChiT = 0.0
###########################################################
## write the output files #################################
if WriteGF:
header = parline+'\n# E\t\tRe GF0\t\tIm GF0\t\tRe SE\t\tIm SE\t\tRe GF\t\tIm GF'
filename = 'gfUp_'+parfname+'.dat'
WriteFileX([GFTup_A,SigmaUp_A,GFintUp_A],WriteMax,WriteStep,header,filename)
#WriteFileX([GFTup_A,SigmaUp_A,(GFintUp_A+sp.flipud(GFintUp_A))/2.0],WriteMax,WriteStep,header,'symmGF.dat')
if h!=0.0:
filename = 'gfDn_'+parfname+'.dat'
WriteFileX([GFTdn_A,SigmaDn_A,GFintDn_A],WriteMax,WriteStep,header,filename)
filename = 'gfMag_'+parfname+'.dat'
WriteFileX([GFintUp_A,GFintDn_A,Sigma_A],WriteMax,WriteStep,header,filename)
## write data to standard output
## use awk 'NR%2==0', awk 'NR%2==1' to separate the output into two blocks
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}'\
.format(U,ed,T,h,sp.real(Lambda),HWHMup,Z,DOSFup,sp.real(Dzero)))
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}\t{9: .6f}'\
.format(U,ed,T,h,nTup,nTdn,nUp,nDn,ChiT,ChiS))
if chat: print('# '+argv[0]+' DONE after {0: .2f} seconds.'.format(float(time()-t)))
## siam_parquet.py end ###
|
pokornyv/SPEpy
|
siam_parquet.py
|
Python
|
gpl-3.0
| 15,368
|
[
"Gaussian"
] |
e727c2be13e55046f84bdef1d2a2d52ac2dca203f3deda3e40366341e1f9b29b
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
logger = logging.getLogger('camelot.view.controls.delegates.localfiledelegate')
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate
from customdelegate import DocumentationMetaclass
from camelot.core.utils import variant_to_pyobject
from camelot.view.controls import editors
from camelot.view.proxy import ValueLoading
class LocalFileDelegate(CustomDelegate):
"""Delegate for displaying a path on the local file system. This path can
either point to a file or a directory
"""
__metaclass__ = DocumentationMetaclass
editor = editors.LocalFileEditor
def __init__(
self,
parent=None,
**kw
):
CustomDelegate.__init__(self, parent, **kw)
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject( index.model().data( index, Qt.EditRole ) )
value_str = u''
if value not in (None, ValueLoading):
value_str = unicode(value)
self.paint_text(painter, option, index, value_str)
painter.restore()
|
jeroendierckx/Camelot
|
camelot/view/controls/delegates/localfiledelegate.py
|
Python
|
gpl-2.0
| 2,211
|
[
"VisIt"
] |
bf7dbc2d9b2eb8aec54fafbd783995469e97510837a0af3f4d68cdac310de146
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import unittest
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication
import pytest
import imtools.sample_data
import imtools.select_label_qt
class MyTestCase(unittest.TestCase):
def setUp(self):
pass
# self.qapp = QApplication(sys.argv)
@pytest.mark.interactive
def test_select_labels(self):
"""
creates VTK file from input data
:return:
"""
datap = imtools.sample_data.donut()
segmentation = datap['segmentation']
voxelsize_mm = datap['voxelsize_mm']
slab = datap["slab"]
slab["label 20"] = 20
slab["label 19"] = 19
slab["label 18"] = 18
slab["label 17"] = 17
slab["label 16"] = 16
slab["label 15"] = 15
slab["label 14"] = 14
slab["label 13"] = 13
slab["label 12"] = 12
slab["label 11"] = 11
slab["label 10"] = 10
slab["label 9"] = 9
slab["label 8"] = 8
slab["label 7"] = 7
slab["label 6"] = 6
slab["label 5"] = 5
import imtools.show_segmentation_qt as ssqt
app = QApplication(sys.argv)
# app.setGraphicsSystem("openvg")
sw = ssqt.SelectLabelWidget(slab=slab, segmentation=segmentation, voxelsize_mm=voxelsize_mm)
# QTest.mouseClick(sw.ui_buttons['Show volume'], Qt.LeftButton)
# sw.add_vtk_file("~/projects/imtools/mesh.vtk")
sw.show()
app.exec_()
@pytest.mark.interactive
def test_pyqtgraph(self):
"""
creates VTK file from input data
:return:
"""
import pyqtgraph.parametertree as pgpt
params = [
{'name': 'Liver', 'type': 'bool', 'value': False, "children": [{"name": "integer", "type": "int", "value": 5}]},
{'name': 'Porta', 'type': 'bool', 'value': False},
{'name': 'Basic parameter data types', 'type': 'group', 'children': [
{'name': 'Integer', 'type': 'int', 'value': 10},
{'name': 'Float', 'type': 'float', 'value': 10.5, 'step': 0.1},
{'name': 'String', 'type': 'str', 'value': "hi"},
{'name': 'List', 'type': 'list', 'values': [1, 2, 3], 'value': 2},
{'name': 'Named List', 'type': 'list', 'values': {"one": 1, "two": "twosies", "three": [3, 3, 3]},
'value': 2},
{'name': 'Boolean', 'type': 'bool', 'value': True, 'tip': "This is a checkbox"},
{'name': 'Color', 'type': 'color', 'value': "FF0", 'tip': "This is a color button"},
{'name': 'Gradient', 'type': 'colormap'},
{'name': 'Subgroup', 'type': 'group', 'children': [
{'name': 'Sub-param 1', 'type': 'int', 'value': 10},
{'name': 'Sub-param 2', 'type': 'float', 'value': 1.2e6},
]},
{'name': 'Text Parameter', 'type': 'text', 'value': 'Some text...'},
{'name': 'Action Parameter', 'type': 'action'},
]},
{'name': 'Numerical Parameter Options', 'type': 'group', 'children': [
{'name': 'Units + SI prefix', 'type': 'float', 'value': 1.2e-6, 'step': 1e-6, 'siPrefix': True,
'suffix': 'V'},
{'name': 'Limits (min=7;max=15)', 'type': 'int', 'value': 11, 'limits': (7, 15), 'default': -6},
{'name': 'DEC stepping', 'type': 'float', 'value': 1.2e6, 'dec': True, 'step': 1, 'siPrefix': True,
'suffix': 'Hz'},
]},
{'name': 'Save/Restore functionality', 'type': 'group', 'children': [
{'name': 'Save State', 'type': 'action'},
{'name': 'Restore State', 'type': 'action', 'children': [
{'name': 'Add missing items', 'type': 'bool', 'value': True},
{'name': 'Remove extra items', 'type': 'bool', 'value': True},
]},
]},
{'name': 'Extra Parameter Options', 'type': 'group', 'children': [
{'name': 'Read-only', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz',
'readonly': True},
{'name': 'Renamable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz',
'renamable': True},
{'name': 'Removable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz',
'removable': True},
]},
# ComplexParameter(name='Custom parameter group (reciprocal values)'),
# ScalableGroup(name="Expandable Parameter Group", children=[
# {'name': 'ScalableParam 1', 'type': 'str', 'value': "default param 1"},
# {'name': 'ScalableParam 2', 'type': 'str', 'value': "default param 2"},
# ]),
]
app = QApplication(sys.argv)
p = pgpt.Parameter.create(name='params', type='group', children=params)
t = pgpt.ParameterTree()
t.setParameters(p)
datap = imtools.sample_data.donut()
segmentation = datap['segmentation']
voxelsize_mm = datap['voxelsize_mm']
slab = datap["slab"]
slab["label 20"] = 20
slab["label 19"] = 19
slab["label 18"] = 18
slab["label 17"] = 17
slab["label 16"] = 16
slab["label 15"] = 15
slab["label 14"] = 14
slab["label 13"] = 13
slab["label 12"] = 12
slab["label 11"] = 11
slab["label 10"] = 10
slab["label 9"] = 9
slab["label 8"] = 8
slab["label 7"] = 7
slab["label 6"] = 6
slab["label 5"] = 5
# import imtools.show_segmentation_qt as ssqt
# app.setGraphicsSystem("openvg")
# sw = ssqt.SelectLabelWidget(slab=slab, segmentation=segmentation, voxelsize_mm=voxelsize_mm)
# QTest.mouseClick(sw.ui_buttons['Show volume'], Qt.LeftButton)
# sw.add_vtk_file("~/projects/imtools/mesh.vtk")
# sw.show()
win = QtWidgets.QWidget()
layout = QtWidgets.QGridLayout()
win.setLayout(layout)
layout.addWidget(
QtWidgets.QLabel("These are two views of the same data. They should always display the same values."), 0, 0, 1,
2)
layout.addWidget(t, 1, 0, 1, 1)
win.show()
app.exec_()
if __name__ == '__main__':
unittest.main()
|
mjirik/imtools
|
tests/pg_widgets_test.py
|
Python
|
mit
| 6,483
|
[
"VTK"
] |
0419b0708d31cf2ddb7df1dd2cb65fed6aa55194b1b299d20b8baae3fa00a52a
|
#ImportModules
import ShareYourSystem as SYS
#Definition a Visiter instance that is grouped
MyVisiter=SYS.VisiterClass().update(
[
(
'<Visiters>FirstChildVisiter',
SYS.VisiterClass().update(
[
(
'<Collecters>GrandChildCumulater',
SYS.CumulaterClass()
)
]
)
),
(
'<Visiters>SecondChildVisiter',
SYS.VisiterClass()
)
]
)
#Walk inside the group in order to parent
MyVisiter.visit(
['Visiters','Collecters'],
[('TagStr','Je suis passe par la')]
)
#Definition the AttestedStr
SYS._attest(
[
'MyVisiter is '+SYS._str(
MyVisiter,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
)
]
)
#Print
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Walkers/Visiter/01_ExampleDoc.py
|
Python
|
mit
| 702
|
[
"VisIt"
] |
11cd8199c26dbc77af31e99e8e45f13e323ae3b15ac73e69e2286726234bf9bb
|
#!/usr/bin/env python
""" Submission of test jobs for use by Jenkins
"""
# pylint: disable=wrong-import-position,unused-wildcard-import,wildcard-import
import os.path
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
#from tests.Workflow.Integration.Test_UserJobs import createJob
gLogger.setLevel('DEBUG')
cwd = os.path.realpath('.')
dirac = Dirac()
# Simple Hello Word job to DIRAC.Jenkins.ch
gLogger.info("\n Submitting hello world job targeting DIRAC.Jenkins.ch")
helloJ = Job()
helloJ.setName("helloWorld-TEST-TO-Jenkins")
helloJ.setInputSandbox([find_all('exe-script.py', '..', '/DIRAC/tests/Workflow/')[0]])
helloJ.setExecutable("exe-script.py", "", "helloWorld.log")
helloJ.setCPUTime(1780)
helloJ.setDestination('DIRAC.Jenkins.ch')
helloJ.setLogLevel('DEBUG')
result = dirac.submitJob(helloJ)
gLogger.info("Hello world job: ", result)
if not result['OK']:
gLogger.error("Problem submitting job", result['Message'])
exit(1)
# Simple Hello Word job to DIRAC.Jenkins.ch, that needs to be matched by a MP WN
gLogger.info("\n Submitting hello world job targeting DIRAC.Jenkins.ch and a MP WN")
helloJMP = Job()
helloJMP.setName("helloWorld-TEST-TO-Jenkins-MP")
helloJMP.setInputSandbox([find_all('exe-script.py', '..', '/DIRAC/tests/Workflow/')[0]])
helloJMP.setExecutable("exe-script.py", "", "helloWorld.log")
helloJMP.setCPUTime(1780)
helloJMP.setDestination('DIRAC.Jenkins.ch')
helloJMP.setLogLevel('DEBUG')
helloJMP.setNumberOfProcessors(2)
result = dirac.submitJob(helloJMP) # this should make the difference!
gLogger.info("Hello world job MP: ", result)
if not result['OK']:
gLogger.error("Problem submitting job", result['Message'])
exit(1)
|
fstagni/DIRAC
|
tests/Jenkins/dirac-test-job.py
|
Python
|
gpl-3.0
| 1,855
|
[
"DIRAC"
] |
f421ffb1b9117f437d3ee06dd29736085bdaf37fc304241f2bd294135d5d8e16
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
CISD analytical nuclear gradients
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.ci import cisd
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import ccsd as ccsd_grad
def grad_elec(cigrad, civec=None, eris=None, atmlst=None, verbose=logger.INFO):
myci = cigrad.base
if civec is None: civec = myci.ci
assert(not isinstance(civec, (list, tuple)))
nocc = myci.nocc
nmo = myci.nmo
d1 = cisd._gamma1_intermediates(myci, civec, nmo, nocc)
fd2intermediate = lib.H5TmpFile()
d2 = cisd._gamma2_outcore(myci, civec, nmo, nocc, fd2intermediate, True)
t1 = t2 = l1 = l2 = civec
return ccsd_grad.grad_elec(cigrad, t1, t2, l1, l2, eris, atmlst, d1, d2, verbose)
def as_scanner(grad_ci, state=0):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).nuc_grad_method().as_scanner()
>>> e_tot, grad = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot, grad = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(grad_ci, lib.GradScanner):
return grad_ci
logger.info(grad_ci, 'Create scanner for %s', grad_ci.__class__)
class CISD_GradScanner(grad_ci.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
ci_scanner = self.base
if ci_scanner.nroots > 1 and state >= ci_scanner.nroots:
raise ValueError('State ID greater than the number of CISD roots')
mf_scanner = ci_scanner._scf
mf_scanner(mol)
ci_scanner.mo_coeff = mf_scanner.mo_coeff
ci_scanner.mo_occ = mf_scanner.mo_occ
if getattr(ci_scanner.ci, 'size', 0) != ci_scanner.vector_size():
ci_scanner.ci = None
eris = ci_scanner.ao2mo(ci_scanner.mo_coeff)
ci_scanner.kernel(ci0=ci_scanner.ci, eris=eris)
# TODO: Check root flip
if ci_scanner.nroots > 1:
e_tot = ci_scanner.e_tot[state]
civec = ci_scanner.ci[state]
else:
e_tot = ci_scanner.e_tot
civec = ci_scanner.ci
self.mol = mol
de = self.kernel(civec, eris=eris, **kwargs)
return e_tot, de
@property
def converged(self):
ci_scanner = self.base
if ci_scanner.nroots > 1:
ci_conv = ci_scanner.converged[state]
else:
ci_conv = ci_scanner.converged
return all((ci_scanner._scf.converged, ci_conv))
# cache eris object in CCSD base class. eris object is used many times
# when calculating gradients
g_ao2mo = grad_ci.base.__class__.ao2mo
def _save_eris(self, *args, **kwargs):
self._eris = g_ao2mo(self, *args, **kwargs)
return self._eris
grad_ci.base.__class__.ao2mo = _save_eris
return CISD_GradScanner(grad_ci)
class Gradients(rhf_grad.GradientsMixin):
def __init__(self, myci):
self.state = 0 # of which the gradients to be computed.
rhf_grad.GradientsMixin.__init__(self, myci)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state %s not converged',
self.base.__class__.__name__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state != 0 and self.base.nroots > 1:
log.info('State ID = %d', self.state)
return self
grad_elec = grad_elec
def kernel(self, civec=None, eris=None, atmlst=None, state=None,
verbose=None):
log = logger.new_logger(self, verbose)
myci = self.base
if civec is None: civec = myci.ci
if civec is None: civec = myci.kernel(eris=eris)
if (isinstance(civec, (list, tuple)) or
(isinstance(civec, numpy.ndarray) and civec.ndim > 1)):
if state is None:
state = self.state
else:
self.state = state
civec = civec[state]
logger.info(self, 'Multiple roots are found in CISD solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(civec, eris, atmlst, verbose=log)
self.de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
self._finalize()
return self.de
# Calling the underlying SCF nuclear gradients because it may be modified
# by external modules (e.g. QM/MM, solvent)
def grad_nuc(self, mol=None, atmlst=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.grad_nuc(mol, atmlst)
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------- %s gradients for state %d ----------',
self.base.__class__.__name__, self.state)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
cisd.CISD.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g'
)
mf = scf.RHF(mol)
ehf = mf.scf()
myci = cisd.CISD(mf)
myci.kernel()
g1 = myci.Gradients().kernel()
# O 0.0000000000 -0.0000000000 0.0065498854
# H -0.0000000000 0.0208760610 -0.0032749427
# H -0.0000000000 -0.0208760610 -0.0032749427
print(lib.finger(g1) - -0.032562200777204092)
mcs = myci.as_scanner()
mol.set_geom_([
["O" , (0. , 0. , 0.001)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
e1 = mcs(mol)
mol.set_geom_([
["O" , (0. , 0. ,-0.001)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
e2 = mcs(mol)
print(g1[0,2] - (e1-e2)/0.002*lib.param.BOHR)
print('-----------------------------------')
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g'
)
mf = scf.RHF(mol)
ehf = mf.scf()
myci = cisd.CISD(mf)
myci.frozen = [0,1,10,11,12]
myci.max_memory = 1
myci.kernel()
g1 = Gradients(myci).kernel()
# O -0.0000000000 0.0000000000 0.0106763547
# H 0.0000000000 -0.0763194988 -0.0053381773
# H 0.0000000000 0.0763194988 -0.0053381773
print(lib.finger(g1) - 0.1022427304650084)
mcs = myci.as_scanner()
mol.set_geom_([
["O" , (0. , 0. , 0.001)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
e1 = mcs(mol)
mol.set_geom_([
["O" , (0. , 0. ,-0.001)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
e2 = mcs(mol)
print(g1[0,2] - (e1-e2)/0.002*lib.param.BOHR)
mol = gto.M(
atom = 'H 0 0 0; H 0 0 1.76',
basis = '631g',
unit='Bohr')
mf = scf.RHF(mol).run(conv_tol=1e-14)
myci = cisd.CISD(mf)
myci.conv_tol = 1e-10
myci.kernel()
g1 = Gradients(myci).kernel()
#[[ 0. 0. -0.07080036]
# [ 0. 0. 0.07080036]]
|
sunqm/pyscf
|
pyscf/grad/cisd.py
|
Python
|
apache-2.0
| 9,526
|
[
"PySCF"
] |
da67360be0f3f270e32d012190615461162c50c3e705ac7c728349c8e37ce988
|
#BEGIN_HEADER
import simplejson
import sys
import os
import glob
import json
import logging
import time
import subprocess
from pprint import pprint
import script_util
from biokbase.workspace.client import Workspace
from biokbase.auth import Token
try:
from biokbase.HandleService.Client import HandleService
except:
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
_KBaseGenomeUtil__DATA_VERSION = "0.5"
class KBaseGenomeUtilException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
no_rst = """{
"BlastOutput_db": "NoDB",
"BlastOutput_iterations": {
"Iteration": [
{
"Iteration_hits": {
"Hit": []
},
"Iteration_iter-num": "1",
"Iteration_message": "ERR_MSG",
"Iteration_query-ID": "lcl|1_0",
"Iteration_query-len": "QRY_LNGTH",
"Iteration_stat": {
"Statistics": {
"Statistics_db-len": "1331648",
"Statistics_db-num": "4280",
"Statistics_eff-space": "2.6633e+06",
"Statistics_entropy": "0.14",
"Statistics_hsp-len": "0",
"Statistics_kappa": "0.041",
"Statistics_lambda": "0.267"
}
}
},
{
"Iteration_hits": {
"Hit": []
},
"Iteration_iter-num": "1",
"Iteration_stat": {
"Statistics": {
"Statistics_db-len": "1331648",
"Statistics_db-num": "4280",
"Statistics_eff-space": "2.6633e+06",
"Statistics_entropy": "0.14",
"Statistics_hsp-len": "0",
"Statistics_kappa": "0.041",
"Statistics_lambda": "0.267"
}
}
}
]
},
"BlastOutput_param": {
"Parameters": {
"Parameters_expect": "0.05",
"Parameters_filter": "F",
"Parameters_gap-extend": "1",
"Parameters_gap-open": "11",
"Parameters_matrix": "BLOSUM62"
}
},
"BlastOutput_program": "error",
"BlastOutput_query-ID": "error",
"BlastOutput_query-def": "error",
"BlastOutput_query-len": "na",
"BlastOutput_reference": "error",
"BlastOutput_version": "error"
}"""
#END_HEADER
class KBaseGenomeUtil:
'''
Module Name:
KBaseGenomeUtil
Module Description:
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
#BEGIN_CLASS_HEADER
# Config variables that SHOULD get overwritten in the constructor
__TEMP_DIR = 'index'
__WS_URL = 'https://ci.kbase.us/services/ws'
__HS_URL = 'https://ci.kbase.us/services/handle_service'
__SHOCK_URL = 'https://ci.kbase.us/services/shock-api/'
__BLAST_DIR = 'blast'
__GENOME_FA = 'genome.fa'
__ANNO_JSON = 'annotation.json'
__QUERY_FA = 'query.fa'
__INDEX_CMD = 'formatdb'
__BLAST_CMD = 'blastall'
__BLAST_OUT = 'result.txt'
__INDEX_ZIP = 'index.zip'
__SVC_USER = 'kbasetest'
__SVC_PASS = ''
__LOGGER = None
__ERR_LOGGER = None
__INDEX_TYPE = {'blastp' : 'protein_db',
'blastx' : 'protein_db',
'blastn' : 'transcript_db',
'tblastn' : 'transcript_db',
'tblastx' : 'transcript_db'}
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
# This is where config variable for deploy.cfg are available
#pprint(config)
if 'ws_url' in config:
self.__WS_URL = config['ws_url']
if 'hs_url' in config:
self.__HS_URL = config['hs_url']
if 'shock_url' in config:
self.__SHOCK_URL = config['shock_url']
if 'temp_dir' in config:
self.__TEMP_DIR = config['temp_dir']
if 'blast_dir' in config:
self.__BLAST_DIR = config['blast_dir']
if 'genome_input_fa' in config:
self.__GENOME_FA = config['genome_input_fa']
if 'query_fa' in config:
self.__QUERY_FA = config['query_fa']
if 'svc_user' in config:
self.__SVC_USER = config['svc_user']
if 'svc_pass' in config:
self.__SVC_PASS = config['svc_pass']
# logging
self.__LOGGER = logging.getLogger('GenomeUtil')
if 'log_level' in config:
self.__LOGGER.setLevel(config['log_level'])
else:
self.__LOGGER.setLevel(logging.INFO)
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
self.__LOGGER.addHandler(streamHandler)
self.__LOGGER.info("Logger was set")
#END_CONSTRUCTOR
pass
def index_genomes(self, ctx, params):
# ctx is the context object
# return variables are: returnVal
#BEGIN index_genomes
user_token=ctx['token']
svc_token = Token(user_id=self.__SVC_USER, password=self.__SVC_PASS).token
ws_client=Workspace(url=self.__WS_URL, token=user_token)
hs = HandleService(url=self.__HS_URL, token=user_token)
gs = {'elements' : {}}
try:
self.__LOGGER.info( "Preparing Target FA")
blast_dir =self.__BLAST_DIR
if os.path.exists(blast_dir):
files=glob.glob("%s/*" % blast_dir)
for f in files: os.remove(f)
if not os.path.exists(blast_dir): os.makedirs(blast_dir)
target_nt_fn = "%s/%s_nt.fa" %( blast_dir, params['blastindex_name'])
target_aa_fn = "%s/%s_aa.fa" %( blast_dir, params['blastindex_name'])
try:
target_nt=open(target_nt_fn,'w')
target_aa=open(target_aa_fn,'w')
except:
self.__LOGGER.error("Couldn't open file")
raise KBaseGenomeUtilException("Backend awe client error: Couldn't open files")
have_nt_seq = False
have_aa_seq = False
# Iterate one at a time to cope with main memory limit for euk genomes
for genome_id in params['genome_ids']:
try:
obj_infos = ws_client.get_object_info_new({"objects": [{'name':genome_id, # replace `0' with loop
'workspace': params['ws_id']}]})
except:
self.__LOGGER.error("Couldn't retrieve %s:%s from the workspace" %(params['ws_id'],genome_id))
raise KBaseGenomeUtilException("Couldn't retrieve %s:%s from the workspace" %(params['ws_id'],genome_id))
if len(obj_infos) < 1:
self.__LOGGER.error("Couldn't find %s:%s from the workspace" %(params['ws_id'],genome_id))
continue
#err_msg += "Workspace error: Couldn't find %s:%s from the workspace\n" %(params['ws_id'],genome_id)
# we can continue due to multiple genomes
#raise Exception("Couldn't find %s:%s from the workspace" %(params['ws_id'],genome_id))
ref_id = "{0}/{1}/{2}".format(obj_infos[0][6],obj_infos[0][0],obj_infos[0][4])
gs['elements'][genome_id] = [ref_id]
self.__LOGGER.info( "Downloading genome object from workspace {0}".format(ref_id))
# TODO: make the following procedures to be loop for each genome_ids
try:
genome_list=ws_client.get_object_subset([{'name':genome_id, # replace `0' with loop
'workspace': params['ws_id'],
'included':['features']}])
#genome_list=ws_client.get_objects([{'name':genome_id, # replace `0' with loop
# 'workspace': params['ws_id']}])
genome = genome_list[0]
except Exception, e:
raise KBaseGenomeUtilException("Failed to download genome object itself even though we got the object information")
self.__LOGGER.info( "Dumping seq for %s" % genome_id)
# Dump genome sequences
check_seq=0
#extract protein sequences from the genome object
try:
for gene in genome['data']['features']:
#>kb.g.1234.CDS.1234#At1g3333 amalase...
function = "NA"
aliases = "NA"
if 'function' in gene:
function = gene['function']
if 'aliases' in gene: aliases = ",".join(gene['aliases'])
if 'protein_translation' in gene:
target_aa.write(">%s#%s#%s#%s\n%s\n" % (gene['id'], ref_id, aliases, function, gene['protein_translation']))
have_aa_seq = True
if 'dna_sequence' in gene:
target_nt.write(">%s#%s#%s#%s\n%s\n" % (gene['id'], ref_id, aliases, function, gene['dna_sequence']))
have_nt_seq = True
except Exception as e:
raise KBaseGenomeUtilException("Failed to dump target sequence for genome : %s" % genome_id)
try:
target_nt.close()
target_aa.close()
except Exception as e:
raise KBaseGenomeUtilException("Failed to close sequence files")
if not have_nt_seq :
self.__LOGGER.info("The genome objects do not contain any dna sequences!")
if not have_aa_seq :
self.__LOGGER.info("The genome objects do not contain any amino acid sequences!")
index_type = 'none'
if have_nt_seq :
try:
cmdstring="%s -i %s -p F" %(self.__INDEX_CMD, target_nt_fn)
# TODO: replace it to subprocess.Popen
tool_process = subprocess.Popen(cmdstring, stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.__LOGGER.info(stdout)
if stderr is not None and len(stderr) > 0:
self.__LOGGER.error("Indexing error: " + stderr)
raise KBaseGenomeUtilException("Indexing error: " + stderr)
except Exception, e:
raise KBaseGenomeUtilException("Failed to run indexing program (%s) : %s " %(self.__INDEX_CMD, e))
index_type = 'nucleotide'
if have_aa_seq :
try:
cmdstring="%s -i %s -p T" %(self.__INDEX_CMD, target_aa_fn)
# TODO: replace it to subprocess.Popen
tool_process = subprocess.Popen(cmdstring, stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.__LOGGER.info(stdout)
if stderr is not None and len(stderr) > 0:
self.__LOGGER.error("Indexing error: " + stderr)
raise KBaseGenomeUtilException("Indexing error: " + stderr)
except Exception, e:
raise KBaseGenomeUtilException("Failed to run indexing program (%s) : %s " %(self.__INDEX_CMD, e))
if index_type == 'nucleotide': index_type = 'both'
else: index_type = 'protein'
#os.remove(target_nt_fn)
#os.remove(target_aa_fn)
# compress
try:
script_util.zip_files(self.__LOGGER, blast_dir, "%s.zip" % params['blastindex_name'])
except Exception, e:
raise KBaseGenomeUtilException("Failed to compress the index: %s" %(e))
try:
handle = hs.upload("%s.zip" % (params['blastindex_name']))
except Exception, e:
raise KBaseGenomeUtilException("Failed to upload the index: %s" %(e))
bi = {'handle' : handle, 'genome_set' : gs, 'index_type' : index_type, 'index_program' : params['index_program']}
if 'description' in params: bi['description'] = params['description']
if index_type == 'none':
err_msg = 'No sequences were indexed'
bi['description'] = err_msg
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastIndex",
"data":bi,
"meta" : {'err_msg' : err_msg},
"name":params['blastindex_name']}
]})
else:
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastIndex",
"data":bi,
"name":params['blastindex_name']}
]})
returnVal = { 'blastindex_ref' : "%s/%s" % (params['ws_id'], params['blastindex_name']) }
if index_type == 'none':
returnVal['err_msg'] = err_msg
except MemoryError, e:
handle = hs.new_handle()
bi = {'handle' : handle, 'genome_set' : gs, 'index_type' : 'none', 'index_program' : params['index_program']}
err_msg = 'Not enough main memory: please use smaller number of genomes only'
bi['description'] = err_msg
returnVal = {'err_msg' : err_msg }
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastIndex",
"data":bi,
"meta" : {'err_msg' : err_msg},
"name":params['blastindex_name']}
]})
except Exception, e:
handle = hs.new_handle()
bi = {'handle' : handle, 'genome_set' : gs, 'index_type' : 'none', 'index_program' : params['index_program']}
err_msg = str(e)
bi['description'] = err_msg
returnVal = {'err_msg' : err_msg }
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastIndex",
"data":bi,
"meta" : {'err_msg' : err_msg},
"name":params['blastindex_name']}
]})
#END index_genomes
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method index_genomes return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def blast_against_genome(self, ctx, params):
# ctx is the context object
# return variables are: returnVal
#BEGIN blast_against_genome
# TODO: Rename blast_search
try:
self.__LOGGER.info( "Preparing FA")
if len(params['query']) > 5:
sequence=params['query']
else:
self.__LOGGER.error("The input sequence is too short!")
raise KBaseGenomeUtilException("The input sequence is too short!")
if not os.path.exists(self.__TEMP_DIR): os.makedirs(self.__TEMP_DIR)
#print "generate input file for query sequence\n"
query_fn = "%s/%s" %(self.__TEMP_DIR, self.__QUERY_FA)
target=open(query_fn,'w')
if sequence.startswith(">"):
target.write(sequence)
else:
seqes = sequence.split("\n")
for i in range(len(seqes)):
target.write(">query_seq_%d\n" %(i))
target.write(seqes[i])
target.close()
user_token=ctx['token']
svc_token = Token(user_id=self.__SVC_USER, password=self.__SVC_PASS).token
ws_client=Workspace(url=self.__WS_URL, token=user_token)
err_msg = ""
blast_dir =self.__BLAST_DIR
if os.path.exists(blast_dir):
files=glob.glob("%s/*" % blast_dir)
for f in files: os.remove(f)
if not os.path.exists(blast_dir): os.makedirs(blast_dir)
target_fn = "%s/%s" %( blast_dir, self.__GENOME_FA)
if 'target_seqs' in params:
# let's build index directly and throw away
sequence = params['target_seqs']
target=open(target_fn,'w')
if sequence.startswith(">"):
target.write(sequence)
else:
seqes = sequence.split("\n")
for i in range(len(seqes)):
target.write(">target_seq_%d\n" %(i))
target.write(seqes[i])
target.close()
if(self.__INDEX_TYPE[params['blast_program']] == 'protein_db'):
formatdb_type='T'
elif(self.__INDEX_TYPE[params['blast_program']] == 'transcript_db'):
formatdb_type='F'
else:
self.__LOGGER.error("{0} is not yet supported".format(params['blast_program']))
raise KBaseGenomeUtilException("{0} is not yet supported".format(params['blast_program']))
cmdstring="%s -i %s -p %s -o T" %(self.__INDEX_CMD, target_fn, formatdb_type)
# TODO: replace it to subprocess.Popen
tool_process = subprocess.Popen(cmdstring, stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.__LOGGER.info(stdout)
if stderr is not None and len(stderr) > 0:
self.__LOGGER.error("Index error: " + stderr)
raise KBaseGenomeUtilException("Index error: " + stderr)
else:
try:
blast_indexes=ws_client.get_object_subset([{'name':params['blastindex_name'],
'workspace': params['ws_id'],
'included':['handle', 'index_type']}])
except:
self.__LOGGER.error("Couldn't find %s:%s from the workspace" %(params['ws_id'],params['blastindex_name']))
raise KBaseGenomeUtilException("Couldn't find %s:%s from the workspace" %(params['ws_id'],params['genome_ids'][0]))
if len(blast_indexes) < 1:
self.__LOGGER.error("Couldn't find %s:%s from the workspace" %(params['ws_id'],params['blastindex_name']))
raise KBaseGenomeUtilException("Couldn't find %s:%s from the workspace" %(params['ws_id'],params['genome_ids'][0]))
# TODO: Add err handling
zip_fn = blast_indexes[0]['data']['handle']['file_name']
target_fn = "%s/%s" %(blast_dir, zip_fn[:-4]) # remove '.zip'
if(self.__INDEX_TYPE[params['blast_program']] == 'protein_db'):
target_fn += '_aa.fa'
if blast_indexes[0]['data']['index_type'] == 'none' or blast_indexes[0]['data']['index_type'] == "nucleotide":
self.__LOGGER.error("The index object does not contain amino acid sequence indexes")
raise KBaseGenomeUtilException("The index object does not contain amino acid sequence indexes")
elif(self.__INDEX_TYPE[params['blast_program']] == 'transcript_db'):
target_fn += '_nt.fa'
if blast_indexes[0]['data']['index_type'] == 'none' or blast_indexes[0]['data']['index_type'] == "protein":
self.__LOGGER.error("The index object does not contain nucleotide sequence indexes")
raise KBaseGenomeUtilException("The index object does not contain nucleotide sequence indexes")
else:
self.__LOGGER.error("{0} is not yet supported".format(params['blast_program']))
raise KBaseGenomeUtilException("{0} is not yet supported".format(params['blast_program']))
# TODO: Add err handling
zip_fn = blast_indexes[0]['data']['handle']['file_name']
#pprint(blast_indexes[0])
self.__LOGGER.info("Downloading the genome index")
#hs = HandleService(url=self.__HS_URL, token=user_token)
try:
script_util.download_file_from_shock(self.__LOGGER,
shock_service_url= blast_indexes[0]['data']['handle']['url'],
shock_id= blast_indexes[0]['data']['handle']['id'],
filename= blast_indexes[0]['data']['handle']['file_name'],
directory= '.',
token = user_token)
except Exception, e:
self.__LOGGER.error("Downloading error from shock: Please contact help@kbase.us")
raise KBaseGenomeUtilException("Downloading error from shock: Please contact help@kbase.us")
try:
script_util.unzip_files(self.__LOGGER, zip_fn, blast_dir)
except Exception, e:
self.__LOGGER.error("Unzip indexfile error: Please contact help@kbase.us")
raise KBaseGenomeUtilException("Unzip indexfile error: Please contact help@kbase.us")
self.__LOGGER.info( "Searching...")
#blast search
cmdstring="%s -p %s -i %s -m 7 -o %s -d %s -e %s" % (self.__BLAST_CMD, params['blast_program'], query_fn, self.__BLAST_OUT, target_fn, params['e-value'])
if 'gap_opening_penalty' in params:
cmdstring += " -G %s" %(params['gap_opening_penalty'])
if 'gap_extension_penalty' in params:
cmdstring += " -E %s" %(params['gap_extension_penalty'])
if 'nucleotide_match_reward' in params:
cmdstring += " -r %s" %(params['nucleotide_match_reward'])
if 'nucleotide_mismatch_penalty' in params:
cmdstring += " -q %s" %(params['nucleotide_mismatch_penalty'])
if 'word_size' in params:
cmdstring += " -W %s" %(params['word_size'])
if 'maximum_alignment_2show' in params:
cmdstring += " -b %s" %(params['maximum_alignment_2show'])
if 'substitution_matrix' in params and params['substitution_matrix'] != 'Default':
cmdstring += " -M %s" %(params['substitution_matrix'])
if 'mega_blast' in params:
cmdstring += " -n %s" %(params['mega_blast'])
if 'gapped_alignment' in params:
cmdstring += " -g %s" %(params['gapped_alignment'])
if 'filter_query_seq' in params:
cmdstring += " -F %s" %(params['filter_query_seq'])
if 'extending_hits' in params:
cmdstring += " -f %s" %(params['extending_hits'])
if 'maximum_seq_2show' in params:
cmdstring += " -v %s" %(params['maximum_seq_2show'])
# TODO: replace it to subprocess.Popen
#print cmdstring
try:
tool_process = subprocess.Popen(cmdstring, stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.__LOGGER.info(stdout)
if stderr is not None and len(stderr) > 0:
self.__LOGGER.error("Search error: " + stderr)
raise KBaseGenomeUtilException("Search error: " + stderr)
# TODO: Convert the following Perl script to python library code
tool_process = subprocess.Popen("xml2kbaseblastjson result.txt > blastoutput_new.json", stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.__LOGGER.info(stdout)
if stderr is not None and len(stderr) > 0:
self.__LOGGER.error("Output conversion error: " + stderr)
raise KBaseGenomeUtilException("Output conversion error: " + stderr)
with open('blastoutput_new.json', 'r') as myfile:
res1 = json.load(myfile)
except Exception,e:
self.__LOGGER.error("Search execution error: Please contact help@kbase.us")
raise KBaseGenomeUtilException("Search execution error: Please contact help@kbase.us")
#os.remove(query_fn)
#extract the blast output
# res=script_util.extract_blast_output(self.__BLAST_OUT, anno=g2f)
#os.remove(self.__BLAST_OUT)
#num_of_hits=len(res)
#metadata=[{'input_genomes':params['genome_ids'][0],'input_sequence':sequence,'number_of_hits':float(num_of_hits)}]
#res1={'hits' : res, 'info':metadata}
self.__LOGGER.info( "Finished!!!")
self.__LOGGER.debug( res1 )
#store the BLAST output back into workspace
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastOutput",
"data":res1,
"name":params['output_name']}
]})
#print res1
except KBaseGenomeUtilException, e:
global no_rst
res1 = json.loads(no_rst)
res1['err_msg'] = str(e)
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastOutput",
"data":res1,
"meta":{"err_msg": str(e)},
"name":params['output_name']}
]})
except Exception, e:
res1 = json.loads(no_rst)
res1['err_msg'] = 'Contact help@kbase.us with the following messages: ' + str(e)
res= ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"GenomeUtil.BlastOutput",
"data":res1,
"meta":{"err_msg": str(e)},
"name":params['output_name']}
]})
finally:
if not isinstance(res1, dict):
res1 = json.loads(no_rst)
res1['err_msg'] = 'Unable to store even the error message to workspace'
returnVal = res1
#END blast_against_genome
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method blast_against_genome return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def filter_BlastOutput(self, ctx, params):
# ctx is the context object
# return variables are: returnVal
#BEGIN filter_BlastOutput
user_token=ctx['token']
ws_client=Workspace(url=self.__WS_URL, token=user_token)
blast_outputs=ws_client.get_objects([{'name':params['in_id'],
'workspace': params['ws_id']}])
fs ={'elements': {}}
fs['description'] = "FeatureSet from BlastOutput by "
printedEvalue = False
printedEntries = False
if 'evalue' in params and params['evalue'] != "":
fs['description'] += " E-value:{0}".format(params['evalue'])
printedEvalue = True
if 'entries' in params and (params['entries'] != "" or params['entries'] > 0):
if(printedEvalue): fs['description'] += ","
fs['description'] += " # of entries :{0}".format(params['entries'])
printedEntries = True
if not printedEvalue and not printedEntries:
fs['description'] += "no filtering"
if len(blast_outputs) != 1:
fs['description'] = "No such blast output object was found : {0}/{1}".format(param['workspace_name'], param['object_name'])
else:
fm = {}
f2g = {}
for boid in blast_outputs[0]['data']['BlastOutput_iterations']['Iteration']:
for hitd in boid['Iteration_hits']['Hit']:
print hitd['Hit_def']
ali = hitd['Hit_def'].find('#')
if(ali < 0): next
fid = hitd['Hit_def'][0:ali]
gri = hitd['Hit_def'].find('#', ali+1)
if fid not in f2g: f2g[fid] = {}
if (gri >= 0 and not gri == (ali+1)):
grid = hitd['Hit_def'][(ali+1):gri]
f2g[fid][grid] = 1
for hspd in hitd['Hit_hsps']['Hsp']:
if fid in fm:
if float(hspd['Hsp_evalue']) < fm[fid]:
fm[fid] = float(hspd['Hsp_evalue'])
else: fm[fid] = float(hspd['Hsp_evalue'])
fms = sorted(fm.items(), key=lambda x: x[1], reverse=False)
bol = len(fms)
if params['entries'] != "" or int(params['entries']) > 0:
if(int(params['entries']) < bol):
bol = int(params['entries'])
for i in range(bol):
if(fms[i][1] > float(params['evalue'])): break
if fms[i][0] in f2g:
fs['elements'][fms[i][0]] = f2g[fms[i][0]].keys()
else:
fs['elements'][fms[i][0]] = []
ws_client.save_objects(
{"workspace":params['ws_id'],
"objects": [{
"type":"KBaseCollections.FeatureSet",
"data":fs,
"name":params['out_id']}
]})
#pprint(fs)
returnVal = {'obj_name' : params['out_id'], 'ws_id' : params['ws_id']}
#END filter_BlastOutput
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method filter_BlastOutput return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
|
kbaseIncubator/core_genome_utilities
|
lib/biokbase/genome_util/KBaseGenomeUtilImpl.py
|
Python
|
mit
| 32,725
|
[
"BLAST"
] |
ea2bf247f46bb5814b701ef6c3f10e5e83735bacf3c44d113e0b0b07b1858270
|
import sys
import moose
import rdesigneur as rd
if len( sys.argv ) > 1:
fname = sys.argv[1]
else:
fname = './cells/h10.CNG.swc'
rdes = rd.rdesigneur(
cellProto = [[fname, 'elec']],
stimList = [['soma', '1', '.', 'inject', 't * 25e-9' ]],
plotList = [['#', '1', '.', 'Vm', 'Membrane potential'],
['#', '1', 'Ca_conc', 'Ca', 'Ca conc (uM)']],
moogList = [['#', '1', '.', 'Vm', 'Soma potential']]
)
rdes.buildModel()
moose.reinit()
rdes.displayMoogli( 0.001, 0.1, rotation = 0.02 )
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex9.0_load_neuronal_morphology_file.py
|
Python
|
gpl-2.0
| 518
|
[
"MOOSE"
] |
4578baea146dcd18e4035204b681c1a2ad283fb88268b2fb551bf409d23c6c92
|
import sys
import bcbio.pipeline.datadict as dd
from bcbio.ngsalign import bowtie2, bwa
from bcbio.log import logger
def clean_chipseq_alignment(data):
aligner = dd.get_aligner(data)
data["raw_bam"] = dd.get_work_bam(data)
if aligner:
if aligner == "bowtie2":
filterer = bowtie2.filter_multimappers
elif aligner == "bwa":
filterer = bwa.filter_multimappers
else:
logger.error("ChIP-seq only supported for bowtie2 and bwa.")
sys.exit(-1)
unique_bam = filterer(dd.get_work_bam(data), data)
data["work_bam"] = unique_bam
else:
logger.info("Warning: When BAM file is given as input, bcbio skips multimappers removal."
"If BAM is not cleaned for peak calling, can result in downstream errors.")
return [[data]]
|
biocyberman/bcbio-nextgen
|
bcbio/chipseq/__init__.py
|
Python
|
mit
| 843
|
[
"BWA"
] |
52106dd3c0a41e8e9a28c654ff83dfa7739435701879fbb746178137cb923b39
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Tuple
from typing import Union
from ORCA.utils.ParseResult import cResultParser
from ORCA.Action import cAction
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.interfaces.BaseInterfaceSettings import cBaseInterFaceSettings
from ORCA.interfaces.BaseInterface import cBaseInterFace
else:
from typing import TypeVar
cBaseInterFace = TypeVar("cBaseInterFace")
cBaseInterFaceSettings = TypeVar("cBaseInterFaceSettings")
class cInterFaceResultParser(cResultParser):
""" Resultparser object for Interfaces """
def __init__(self,oInterFace:cBaseInterFace,uConfigName:str):
super().__init__()
self.oInterFace:cBaseInterFace = oInterFace
self.uConfigName:str = uConfigName
self.uObjectName = oInterFace.uObjectName
self.uDebugContext = "Interface: % s , Config: %s:" % (self.uObjectName,self.uConfigName)
self.uContext = self.uObjectName + '/' + self.uConfigName
self.oAction:cAction = Union[cAction,None]
self.oSetting:Union[cBaseInterFaceSettings,None]= None
def ParseResult(self,oAction,uResponse,oSetting) -> Tuple[str,str]:
"""
:param cAction oAction: The Action object
:param string uResponse: The response to parse
:param cBaseInterFaceSettings oSetting: The interface setting of the action
:return: The result of parse action
"""
self.oAction = oAction
self.oSetting = oSetting
return self.Parse(uResponse=uResponse,
uGetVar=oAction.uGetVar,
uParseResultOption=oAction.uParseResultOption,
uGlobalDestVar=oAction.uGlobalDestVar,
uLocalDestVar=oAction.uLocalDestVar,
uTokenizeString=oAction.uParseResultTokenizeString,
uParseResultFlags=oAction.uParseResultFlags)
|
thica/ORCA-Remote
|
src/ORCA/interfaces/InterfaceResultParser.py
|
Python
|
gpl-3.0
| 3,013
|
[
"ORCA"
] |
2604e188c8e3ea0aa73d4fe995cb7c97cb78d8bffb09971cf8acd80d901a88e4
|
# crest_macro.py
# the macro to generate a crest, the optinal gear of the cross_cube
# created by charlyoleg on 2013/12/11
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
################################################################
# this file intends being included in the file bin/cnc25d_example_generator.py
# for this purpose, there is some syntaxe restrictions
# don't use triple single-quotes (') and return character ('\'.'n') in this file
# but you can still use triple double-quote (")
################################################################
"""
this piece of code is an example of how to use the parametric design crest
You can also use this file as a FreeCAD macro from the GUI
You can also copy-paste this code in your own design files
If you don't know which value to set to a constraint-parameter, just comment it. Default value is used, if you don't set a constraint explicitly.
"""
################################################################
# Installation pre-request
################################################################
# This script needs freecad and Cnc25D installed on your system
# visit those sites for more information:
# http://www.freecadweb.org/
# https://pypi.python.org/pypi/Cnc25D
#
# To install FreeCAD on Ubuntu, run the following command:
# > sudo apt-get install freecad
# or to get the newest version:
# > sudo add-apt-repository ppa:freecad-maintainers/freecad-stable
# > sudo apt-get update
# > sudo apt-get install freecad
# and optionally:
# > sudo apt-get install freecad-doc freecad-dev
# To install the python package cnc25d, run the following command:
# > sudo pip install Cnc25D
# or
# > sudo pip install Cnc25D -U
################################################################
# header for Python / FreeCAD compatibility
################################################################
try: # when working with an installed Cnc25D package
from cnc25d import cnc25d_api
except: # when working on the source files
import importing_cnc25d # give access to the cnc25d package
from cnc25d import cnc25d_api
cnc25d_api.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
################################################################
# import
################################################################
#
from cnc25d import cnc25d_design
#
import Part
################################################################
# parameters value
################################################################
#
# choose the values of the parameters by editing this file
# feature request : create a GUI with PyQt4 to edit those parameter values
crest_constraint = {} # This python-dictionary contains all the constraint-parameters to build the crest part (gear for cross_cube)
##### parameter inheritance from cross_cube_sub
### face A1, A2, B1 and B2
# height
crest_constraint['axle_diameter'] = 10.0
crest_constraint['inter_axle_length'] = 15.0
crest_constraint['height_margin'] = 10.0
crest_constraint['top_thickness'] = 5.0
# width
crest_constraint['cube_width'] = 60.0
crest_constraint['face_B1_thickness'] = 8.0
crest_constraint['face_B2_thickness'] = 6.0
crest_constraint['face_A1_thickness'] = crest_constraint['face_B1_thickness'] # not directly used by crest but inherited by cross_cube
crest_constraint['face_A2_thickness'] = crest_constraint['face_B2_thickness'] # not directly used by crest but inherited by cross_cube
### threaded rod
# face
crest_constraint['face_rod_hole_diameter'] = 4.0
crest_constraint['face_rod_hole_h_position'] = 5.0
crest_constraint['face_rod_hole_v_distance'] = 5.0
crest_constraint['face_rod_hole_v_position'] = 5.0
### hollow
# face hollow
crest_constraint['face_hollow_leg_nb'] = 1 # possible values: 1 (filled), 4, 8
crest_constraint['face_hollow_border_width'] = 0.0
crest_constraint['face_hollow_axle_width'] = 0.0
crest_constraint['face_hollow_leg_width'] = 0.0
crest_constraint['face_hollow_smoothing_radius'] = 0.0
### manufacturing
crest_constraint['cross_cube_cnc_router_bit_radius'] = 1.0
crest_constraint['cross_cube_extra_cut_thickness'] = 0.0
##### parameter inheritance from gear_profile
### first gear
# general
crest_constraint['gear_addendum_dedendum_parity'] = 50.0
# tooth height
crest_constraint['gear_tooth_half_height'] = 0.0
crest_constraint['gear_addendum_height_pourcentage'] = 100.0
crest_constraint['gear_dedendum_height_pourcentage'] = 100.0
crest_constraint['gear_hollow_height_pourcentage'] = 25.0
crest_constraint['gear_router_bit_radius'] = 0.1
# positive involute
crest_constraint['gear_base_diameter'] = 0.0
crest_constraint['gear_force_angle'] = 0.0
crest_constraint['gear_tooth_resolution'] = 2
crest_constraint['gear_skin_thickness'] = 0.0
# negative involute (if zero, negative involute'] = positive involute)
crest_constraint['gear_base_diameter_n'] = 0.0
crest_constraint['gear_force_angle_n'] = 0.0
crest_constraint['gear_tooth_resolution_n'] = 0
crest_constraint['gear_skin_thickness_n'] = 0.0
### second gear
# general
crest_constraint['second_gear_type'] = 'e'
crest_constraint['second_gear_tooth_nb'] = 0
crest_constraint['second_gear_primitive_diameter'] = 0.0
crest_constraint['second_gear_addendum_dedendum_parity'] = 0.0
# tooth height
crest_constraint['second_gear_tooth_half_height'] = 0.0
crest_constraint['second_gear_addendum_height_pourcentage'] = 100.0
crest_constraint['second_gear_dedendum_height_pourcentage'] = 100.0
crest_constraint['second_gear_hollow_height_pourcentage'] = 25.0
crest_constraint['second_gear_router_bit_radius'] = 0.0
# positive involute
crest_constraint['second_gear_base_diameter'] = 0.0
crest_constraint['second_gear_tooth_resolution'] = 0
crest_constraint['second_gear_skin_thickness'] = 0.0
# negative involute (if zero, negative involute'] = positive involute)
crest_constraint['second_gear_base_diameter_n'] = 0.0
crest_constraint['second_gear_tooth_resolution_n'] = 0
crest_constraint['second_gear_skin_thickness_n'] = 0.0
### gearbar specific
crest_constraint['gearbar_slope'] = 0.0
crest_constraint['gearbar_slope_n'] = 0.0
### position
# second gear position
crest_constraint['second_gear_position_angle'] = 0.0
crest_constraint['second_gear_additional_axis_length'] = 0.0
##### crest specific
### outline
crest_constraint['gear_module'] = 3.0
crest_constraint['virtual_tooth_nb'] = 60
crest_constraint['portion_tooth_nb'] = 30
crest_constraint['free_mounting_width'] = 15.0
### crest_hollow
crest_constraint['crest_hollow_leg_nb'] = 4 # possible values: 1(filled), 2(end-legs only), 3, 4 ...
crest_constraint['end_leg_width'] = 10.0
crest_constraint['middle_leg_width'] = 0.0
crest_constraint['crest_hollow_external_diameter'] = 0.0
crest_constraint['crest_hollow_internal_diameter'] = 0.0
crest_constraint['floor_width'] = 0.0
crest_constraint['crest_hollow_smoothing_radius'] = 0.0
### gear_holes
crest_constraint['fastening_hole_diameter'] = 5.0
crest_constraint['fastening_hole_position'] = 0.0
crest_constraint['centring_hole_diameter'] = 1.0
crest_constraint['centring_hole_distance'] = 8.0
crest_constraint['centring_hole_position'] = 0.0
## part thickness
crest_constraint['crest_thickness'] = 5.0
### manufacturing
crest_constraint['crest_cnc_router_bit_radius'] = 0.5
################################################################
# action
################################################################
my_crest = cnc25d_design.crest(crest_constraint)
my_crest.outline_display()
my_crest.write_info_txt("test_output/crest_macro")
my_crest.write_figure_svg("test_output/crest_macro")
my_crest.write_figure_dxf("test_output/crest_macro")
my_crest.write_figure_brep("test_output/crest_macro")
my_crest.write_assembly_brep("test_output/crest_macro")
my_crest.write_freecad_brep("test_output/crest_macro")
my_crest.run_simulation("")
my_crest.view_design_configuration()
#my_crest.run_self_test("")
#my_crest.cli("--output_file_basename test_output/alm.dxf") # Warning: all constraint values are reset to their default values
if(cnc25d_api.interpretor_is_freecad()):
Part.show(my_crest.get_fc_obj_3dconf('crest_3dconf1'))
|
charlyoleg/Cnc25D
|
cnc25d/tests/crest_macro.py
|
Python
|
gpl-3.0
| 8,987
|
[
"VisIt"
] |
b16f4098f5c98c4616664ebc98064f5ea855eaa9905a147769d94e86aebbeaa8
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing the libsmm library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_libsmm(EasyBlock):
"""
Support for the CP2K small matrix library
Notes: - build can take really really long, and no real rebuilding needed for each get_version
- CP2K can be built without this
"""
@staticmethod
def extra_options():
# default dimensions
dd = [1,4,5,6,9,13,16,17,22]
extra_vars = {
'transpose_flavour': [1, "Transpose flavour of routines", CUSTOM],
'max_tiny_dim': [12, "Maximum tiny dimension", CUSTOM],
'dims': [dd, "Generate routines for these matrix dims", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build: change to tools/build_libsmm dir"""
try:
dst = 'tools/build_libsmm'
os.chdir(dst)
self.log.debug('Change to directory %s' % dst)
except OSError, err:
raise EasyBuildError("Failed to change to directory %s: %s", dst, err)
def build_step(self):
"""Build libsmm
Possible iterations over precision (single/double) and type (real/complex)
- also type of transpose matrix
- all set in the config file
Make the config.in file (is source afterwards in the build)
"""
fn = 'config.in'
cfg_tpl = """# This config file was generated by EasyBuild
# the build script can generate optimized routines packed in a library for
# 1) 'nn' => C=C+MATMUL(A,B)
# 2) 'tn' => C=C+MATMUL(TRANSPOSE(A),B)
# 3) 'nt' => C=C+MATMUL(A,TRANSPOSE(B))
# 4) 'tt' => C=C+MATMUL(TRANPOSE(A),TRANPOSE(B))
#
# select a tranpose_flavor from the list 1 2 3 4
#
transpose_flavor=%(transposeflavour)s
# 1) d => double precision real
# 2) s => single precision real
# 3) z => double precision complex
# 4) c => single precision complex
#
# select a data_type from the list 1 2 3 4
#
data_type=%(datatype)s
# target compiler... this are the options used for building the library.
# They should be aggessive enough to e.g. perform vectorization for the specific CPU (e.g. -ftree-vectorize -march=native),
# and allow some flexibility in reordering floating point expressions (-ffast-math).
# Higher level optimisation (in particular loop nest optimization) should not be used.
#
target_compile="%(targetcompile)s"
# target dgemm link options... these are the options needed to link blas (e.g. -lblas)
# blas is used as a fall back option for sizes not included in the library or in those cases where it is faster
# the same blas library should thus also be used when libsmm is linked.
#
OMP_NUM_THREADS=1
blas_linking="%(LIBBLAS)s"
# matrix dimensions for which optimized routines will be generated.
# since all combinations of M,N,K are being generated the size of the library becomes very large
# if too many sizes are being optimized for. Numbers have to be ascending.
#
dims_small="%(dims)s"
# tiny dimensions are used as primitves and generated in an 'exhaustive' search.
# They should be a sequence from 1 to N,
# where N is a number that is large enough to have good cache performance
# (e.g. for modern SSE cpus 8 to 12)
# Too large (>12?) is not beneficial, but increases the time needed to build the library
# Too small (<8) will lead to a slow library, but the build might proceed quickly
# The minimum number for a successful build is 4
#
dims_tiny="%(tiny_dims)s"
# host compiler... this is used only to compile a few tools needed to build the library.
# The library itself is not compiled this way.
# This compiler needs to be able to deal with some Fortran2003 constructs.
#
host_compile="%(hostcompile)s "
# number of processes to use in parallel for compiling / building and benchmarking the library.
# Should *not* be more than the physical (available) number of cores of the machine
#
tasks=%(tasks)s
"""
# only GCC is supported for now
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
hostcompile = os.getenv('F90')
# optimizations
opts = "-O2 -funroll-loops -ffast-math -ftree-vectorize -march=native -fno-inline-functions"
# Depending on the get_version, we need extra options
extra = ''
gccVersion = LooseVersion(get_software_version('GCC'))
if gccVersion >= LooseVersion('4.6'):
extra = "-flto"
targetcompile = "%s %s %s" % (hostcompile, opts, extra)
else:
raise EasyBuildError("No supported compiler found (tried GCC)")
if not os.getenv('LIBBLAS'):
raise EasyBuildError("No BLAS library specifications found (LIBBLAS not set)!")
cfgdict = {
'datatype': None,
'transposeflavour': self.cfg['transpose_flavour'],
'targetcompile': targetcompile,
'hostcompile': hostcompile,
'dims': ' '.join([str(d) for d in self.cfg['dims']]),
'tiny_dims': ' '.join([str(d) for d in range(1, self.cfg['max_tiny_dim']+1)]),
'tasks': self.cfg['parallel'],
'LIBBLAS': "%s %s" % (os.getenv('LDFLAGS'), os.getenv('LIBBLAS'))
}
# configure for various iterations
datatypes = [(1, 'double precision real'), (3, 'double precision complex')]
for (dt, descr) in datatypes:
cfgdict['datatype'] = dt
try:
txt = cfg_tpl % cfgdict
f = open(fn, 'w')
f.write(txt)
f.close()
self.log.debug("config file %s for datatype %s ('%s'): %s" % (fn, dt, descr, txt))
except IOError, err:
raise EasyBuildError("Failed to write %s: %s", fn, err)
self.log.info("Building for datatype %s ('%s')..." % (dt, descr))
run_cmd("./do_clean")
run_cmd("./do_all")
def install_step(self):
"""Install CP2K: clean, and copy lib directory to install dir"""
run_cmd("./do_clean")
try:
shutil.copytree('lib', os.path.join(self.installdir, 'lib'))
except Exception, err:
raise EasyBuildError("Something went wrong during dir lib copying to installdir: %s", err)
def sanity_check_step(self):
"""Custom sanity check for libsmm"""
custom_paths = {
'files': ["lib/libsmm_%s.a" % x for x in ["dnn", "znn"]],
'dirs': []
}
super(EB_libsmm, self).sanity_check_step(custom_paths=custom_paths)
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/l/libsmm.py
|
Python
|
gpl-2.0
| 8,349
|
[
"CP2K"
] |
48a6830051571bf0dab185c9f8621aca5bf9706f7b1b7dddf085d79465f7f69d
|
#!/usr/bin/env python
'''
Master loader for CANON July (Summer) Campaign 2020
'''
import os
import sys
from datetime import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_july2020', 'CANON - July 2020',
description='July 2020 shipless campaign in Monterey Bay (CN20S)',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'name': 'Monterey25_10x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
startdate = datetime(2020, 7, 15)
enddate = datetime(2020, 8, 5)
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662a_files = [ 'OS_Glider_L_662_20200615_TS.nc', ]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# NPS_34a updated parameter names in netCDF file
## The following loads decimated subset of data telemetered during deployment
cl.nps34a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34a_files = [ 'OS_Glider_NPS_G34_20200707_TS.nc' ]
cl.nps34a_parms = ['temperature', 'salinity','fluorescence']
cl.nps34a_startDatetime = startdate
cl.nps34a_endDatetime = enddate
# NPS_29 ##
cl.nps29_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps29_files = [ 'OS_Glider_NPS_G29_20200722_TS.nc' ]
cl.nps29_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps29_startDatetime = startdate
cl.nps29_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Hansen - All instruments combined into one file - one time coordinate
cl.wg_Hansen_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Hansen_files = [
'wgHansen/20200716/realTime/20200716.nc'
]
cl.wg_Hansen_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc_float','O2_conc_sub' ] # two ctds (_float, _sub), no CO2
cl.wg_Hansen_depths = [ 0 ]
cl.wg_Hansen_startDatetime = startdate
cl.wg_Hansen_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20200717/realTime/20200717.nc'
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'201907/OS_M1_20190729hourly_CMSTV.nc', ]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 10
elif cl.args.stride:
cl.stride = cl.args.stride
load_shark_bite = False
if load_shark_bite:
# Load 10 Hz orientation data from the shark bite at 2306 20 July 2020, see:
# https://mbari.slack.com/archives/C4VJ11Q83/p1595610046147800?thread_ts=1595544882.109700&cid=C4VJ11Q83
cl.brizo_base = 'http://dods.mbari.org/opendap/data/lrauv/brizo/missionlogs/2020/20200720_20200723/20200720T202049/'
cl.brizo_files = ['202007202020_202007210640_100ms_scieng.nc', ]
cl.brizo_parms = [ 'yaw', 'pitch', 'roll',
]
cl.loadLRAUV('brizo', startdate, enddate, build_attrs=False)
cl.addTerrainResources()
sys.exit()
cl.loadM1()
#cl.loadL_662a()
cl.load_NPS29()
cl.load_NPS34a()
cl.load_wg_Tiny()
cl.load_wg_Hansen()
cl.loadLRAUV('brizo', startdate, enddate)
cl.loadLRAUV('makai', startdate, enddate)
#cl.loadDorado(startdate, enddate, build_attrs=True)
##cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
duane-edgington/stoqs
|
stoqs/loaders/CANON/loadCANON_july2020.py
|
Python
|
gpl-3.0
| 5,960
|
[
"NetCDF"
] |
f74f9589daccac6e009525f7b620ce081cfe26016e62fa4d3e881718e44ef86c
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import logging
from datetime import datetime
from os import sys
from grimoire_elk.elk import feed_backend, enrich_backend
from grimoire_elk.elastic import ElasticSearch
from grimoire_elk.elastic_items import ElasticItems
from grimoire_elk.utils import get_params, config_logging
if __name__ == '__main__':
"""Perceval2Ocean tool"""
app_init = datetime.now()
args = get_params()
config_logging(args.debug)
url = args.elastic_url
clean = args.no_incremental
if args.fetch_cache:
clean = True
try:
if args.backend:
# Configure elastic bulk size and scrolling
if args.bulk_size:
ElasticSearch.max_items_bulk = args.bulk_size
if args.scroll_size:
ElasticItems.scroll_size = args.scroll_size
if not args.enrich_only:
feed_backend(url, clean, args.fetch_cache,
args.backend, args.backend_args,
args.index, args.index_enrich, args.project,
args.arthur)
logging.info("Backend feed completed")
studies_args = None
if args.studies_list:
# Convert the list to the expected format in enrich_backend method
studies_args = []
for study in args.studies_list:
studies_args.append({"name": study,
"type": study,
"params": {}
})
if args.enrich or args.enrich_only:
unaffiliated_group = None
enrich_backend(url, clean, args.backend, args.backend_args, None,
args.index, args.index_enrich,
args.db_projects_map, args.json_projects_map,
args.db_sortinghat,
args.no_incremental, args.only_identities,
args.github_token,
args.studies, args.only_studies,
args.elastic_url_enrich, args.events_enrich,
args.db_user, args.db_password, args.db_host,
args.refresh_projects, args.refresh_identities,
args.author_id, args.author_uuid,
args.filter_raw, args.filters_raw_prefix,
args.jenkins_rename_file, unaffiliated_group,
args.pair_programming, studies_args)
logging.info("Enrich backend completed")
elif args.events_enrich:
logging.info("Enrich option is needed for events_enrich")
else:
logging.error("You must configure a backend")
except KeyboardInterrupt:
logging.info("\n\nReceived Ctrl-C or other break signal. Exiting.\n")
sys.exit(0)
total_time_min = (datetime.now() - app_init).total_seconds() / 60
logging.info("Finished in %.2f min" % (total_time_min))
|
grimoirelab/GrimoireELK
|
utils/p2o.py
|
Python
|
gpl-3.0
| 3,937
|
[
"Elk"
] |
c15270521edc059270c9e7b9458d12b72521977a74ff9dc1344e13c57a32ae2b
|
'''
Validate the string, checking all open and close brackets
'''
OPEN_BRACKETS = {'{', '(', '['}
CLOSE_BRACKETS = {'}', ')', ']'}
BRACKETS_MAP = {'{':'}', '(':')', '[':']'}
def check_string(s):
stack = []
for c in s:
if c in CLOSE_BRACKETS:
bracket = stack.pop()
if c != BRACKETS_MAP[bracket]:
return False
if c in OPEN_BRACKETS:
stack.append(c)
return len(stack) == 0
assert check_string('{{}} + 3 + ((0 hs )) + {[]}') == True
assert check_string('{]}') == False
assert check_string('{{}[]()}') == True
assert check_string('{{()()([][{}])}[][]{{}{{{}}}}()}') == True
assert check_string('{{()()([][{}])}[][]{{1234*303044}{{{}}}}()}') == True
assert check_string('{{()()([][{}])}[][]{{1234*303044}{{{[}}}}()}') == False
'''
Given a flat file of book metadata, write a Library class that parses the book data and provides an API that lets you search for all books containing a word.
API:
Library
- <constructor>(input) -> returns a Library object
- search(word) -> returns all books that contain the word anywhere in the
title, author, or description fields. Only matches *whole* words.
E.g. Searching for "My" or "book" would match a book containing "My book", but searching for "My b" or "boo" would *not* match.
'''
LIBRARY_DATA = """
TITLE: Hitchhiker's Guide to the Galaxy
AUTHOR: Douglas Adams
DESCRIPTION: Seconds before the Earth is demolished to make way for a galactic freeway, Arthur Dent is plucked off the planet by his friend Ford Prefect, a researcher for the revised edition of The Hitchhiker's Guide to the Galaxy who, for the last fifteen years, has been posing as an out-of-work actor.
TITLE: Dune
AUTHOR: Frank Herbert
DESCRIPTION: The troubles begin when stewardship of Arrakis is transferred by the Emperor from the Harkonnen Noble House to House Atreides. The Harkonnens don't want to give up their privilege, though, and through sabotage and treachery they cast young Duke Paul Atreides out into the planet's harsh environment to die. There he falls in with the Fremen, a tribe of desert dwellers who become the basis of the army with which he will reclaim what's rightfully his. Paul Atreides, though, is far more than just a usurped duke. He might be the end product of a very long-term genetic experiment designed to breed a super human; he might be a messiah. His struggle is at the center of a nexus of powerful people and events, and the repercussions will be felt throughout the Imperium.
TITLE: A Song Of Ice And Fire Series
AUTHOR: George R.R. Martin
DESCRIPTION: As the Seven Kingdoms face a generation-long winter, the noble Stark family confronts the poisonous plots of the rival Lannisters, the emergence of the White Walkers, the arrival of barbarian hordes, and other threats.
"""
class Book():
def __init__(self, title, author, description):
self._title = title
self._author = author
self._description = description
@property
def title(self):
return self._title
def author(self):
return self._author
def description(self):
return self._description
def has_word(self, word):
if word in self._title:
return True
if word in self._author:
return True
if word in self._description:
return True
return False
def __repr__(self):
return str(self._title) + str(self._author) + str(self._description)
class Library:
def __init__(self, data):
title = ''
author = ''
self._books = []
for item in data.split('\n'):
if 'TITLE' in item:
title = item[len('TITLE: '):]
elif 'AUTHOR' in item:
author = item[len('AUTHOR: '):]
elif 'DESCRIPTION' in item:
self._books.append(Book(title, author, item[len('DESCRIPTION: '):]))
title = ''
author = ''
def search(self, word):
items = [ book for book in self._books if book.has_word(word) ]
return items
library = Library(LIBRARY_DATA)
first_results = library.search("Arrakis")
assert first_results[0].title == "Dune"
second_results = library.search("winter")
assert second_results[0].title == "A Song Of Ice And Fire Series"
third_results = library.search("demolished")
assert third_results[0].title == "Hitchhiker's Guide to the Galaxy"
fourth_results = library.search("the")
assert len(fourth_results) == 3
assert fourth_results[0].title == "Hitchhiker's Guide to the Galaxy"
assert fourth_results[1].title == "Dune"
assert fourth_results[2].title == "A Song Of Ice And Fire Series"
|
fleith/coding
|
interviews/interview1.py
|
Python
|
unlicense
| 4,696
|
[
"Galaxy"
] |
59d5ce9cddfd892226718bee552b00f31ddbabde132c13ac34d0cdf4ca563778
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import os
import sys
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def walk(directory, recursive):
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirpath, filename)
else:
dirpath, dirnames, filenames = os.walk(directory).next()
for filename in filenames:
yield os.path.join(dirpath, filename)
#--------------------------------------------------------------------------------------------------
# Processing code.
#--------------------------------------------------------------------------------------------------
def process_file(filepath):
print("processing {0}...".format(filepath))
with open(filepath) as f:
lines = f.readlines()
section_begin = -1
for index in range(len(lines)):
line = lines[index]
if section_begin == -1 and line.startswith("#include"):
section_begin = index
if section_begin != -1 and line in ["\n", "\r\n"]:
if all(clause.startswith("#include") for clause in lines[section_begin:index]):
lines[section_begin:index] = sorted(lines[section_begin:index], key=lambda s: s.lower())
section_begin = -1
with open(filepath + ".processed", "wt") as f:
for line in lines:
f.write(line)
os.remove(filepath)
os.rename(filepath + ".processed", filepath)
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="sort #include clauses in c++ source code.")
parser.add_argument("-r", "--recursive", action='store_true', dest='recursive',
help="process all files in the specified directory and all its subdirectories")
parser.add_argument("path", help="file or directory to process")
args = parser.parse_args()
if os.path.isfile(args.path):
process_file(args.path)
else:
for filepath in walk(args.path, args.recursive):
ext = os.path.splitext(filepath)[1]
if ext == ".h" or ext == ".cpp":
process_file(filepath)
if __name__ == '__main__':
main()
|
gospodnetic/appleseed
|
scripts/sortincludes.py
|
Python
|
mit
| 3,882
|
[
"VisIt"
] |
eb501425cc278ac6e8669fe0a016f0469bc4df433e873dbea2bf8daab3475fce
|
"""Fractal functions"""
import numpy as np
from numba import jit, types
from math import log, floor
from .entropy import num_zerocross
from .utils import _linear_regression, _log_n
all = ['petrosian_fd', 'katz_fd', 'higuchi_fd', 'detrended_fluctuation']
def petrosian_fd(x, axis=-1):
"""Petrosian fractal dimension.
Parameters
----------
x : list or np.array
1D or N-D data.
axis : int
The axis along which the FD is calculated. Default is -1 (last).
Returns
-------
pfd : float
Petrosian fractal dimension.
Notes
-----
The Petrosian fractal dimension of a time-series :math:`x` is defined by:
.. math:: P = \\frac{\\log_{10}(N)}{\\log_{10}(N) +
\\log_{10}(\\frac{N}{N+0.4N_{\\delta}})}
where :math:`N` is the length of the time series, and
:math:`N_{\\delta}` is the number of sign changes in the signal derivative.
Original code from the `pyrem <https://github.com/gilestrolab/pyrem>`_
package by Quentin Geissmann.
References
----------
* A. Petrosian, Kolmogorov complexity of finite sequences and
recognition of different preictal EEG patterns, in , Proceedings of the
Eighth IEEE Symposium on Computer-Based Medical Systems, 1995,
pp. 212-217.
* Goh, Cindy, et al. "Comparison of fractal dimension algorithms for
the computation of EEG biomarkers for dementia." 2nd International
Conference on Computational Intelligence in Medicine and Healthcare
(CIMED2005). 2005.
Examples
--------
>>> import numpy as np
>>> import antropy as ant
>>> import stochastic.processes.noise as sn
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.5, rng=rng).sample(10000)
>>> print(f"{ant.petrosian_fd(x):.4f}")
1.0264
Fractional Gaussian noise with H = 0.9
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.9, rng=rng).sample(10000)
>>> print(f"{ant.petrosian_fd(x):.4f}")
1.0235
Fractional Gaussian noise with H = 0.1
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.1, rng=rng).sample(10000)
>>> print(f"{ant.petrosian_fd(x):.4f}")
1.0283
Random
>>> rng = np.random.default_rng(seed=42)
>>> print(f"{ant.petrosian_fd(rng.random(1000)):.4f}")
1.0350
Pure sine wave
>>> x = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)
>>> print(f"{ant.petrosian_fd(x):.4f}")
1.0010
Linearly-increasing time-series (should be 1)
>>> x = np.arange(1000)
>>> print(f"{ant.petrosian_fd(x):.4f}")
1.0000
"""
x = np.asarray(x)
N = x.shape[axis]
# Number of sign changes in the first derivative of the signal
nzc_deriv = num_zerocross(np.diff(x, axis=axis), axis=axis)
pfd = np.log10(N) / (np.log10(N) + np.log10(N / (N + 0.4 * nzc_deriv)))
return pfd
def katz_fd(x, axis=-1):
"""Katz Fractal Dimension.
Parameters
----------
x : list or np.array
1D or N-D data.
axis : int
The axis along which the FD is calculated. Default is -1 (last).
Returns
-------
kfd : float
Katz fractal dimension.
Notes
-----
Katz’s method calculates the fractal dimension of a sample as follows:
the sum and average of the Euclidean distances between the successive
points of the sample (:math:`L` and :math:`a` , resp.) are calculated as
well as the maximum distance between the first point and any other point
of the sample (:math:`d`). The fractal dimension of the sample (:math:`D`)
then becomes:
.. math::
D = \\frac{\\log_{10}(L/a)}{\\log_{10}(d/a)} =
\\frac{\\log_{10}(n)}{\\log_{10}(d/L)+\\log_{10}(n)}
where :math:`n` is :math:`L` divided by :math:`a`.
Original code from the `mne-features <https://mne.tools/mne-features/>`_
package by Jean-Baptiste Schiratti and Alexandre Gramfort.
References
----------
* https://ieeexplore.ieee.org/abstract/document/904882
* https://hal.inria.fr/inria-00442374/
* https://www.hindawi.com/journals/ddns/2011/724697/
Examples
--------
>>> import numpy as np
>>> import antropy as ant
>>> import stochastic.processes.noise as sn
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.5, rng=rng).sample(10000)
>>> print(f"{ant.katz_fd(x):.4f}")
6.4713
Fractional Gaussian noise with H = 0.9
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.9, rng=rng).sample(10000)
>>> print(f"{ant.katz_fd(x):.4f}")
4.5720
Fractional Gaussian noise with H = 0.1
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.1, rng=rng).sample(10000)
>>> print(f"{ant.katz_fd(x):.4f}")
7.6540
Random
>>> rng = np.random.default_rng(seed=42)
>>> print(f"{ant.katz_fd(rng.random(1000)):.4f}")
8.1531
Pure sine wave
>>> x = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)
>>> print(f"{ant.katz_fd(x):.4f}")
2.4871
Linearly-increasing time-series (should be 1)
>>> x = np.arange(1000)
>>> print(f"{ant.katz_fd(x):.4f}")
1.0000
"""
x = np.asarray(x)
dists = np.abs(np.diff(x, axis=axis))
ll = dists.sum(axis=axis)
ln = np.log10(ll / dists.mean(axis=axis))
aux_d = x - np.take(x, indices=[0], axis=axis)
d = np.max(np.abs(aux_d), axis=axis)
kfd = np.squeeze(ln / (ln + np.log10(d / ll)))
if not kfd.ndim:
kfd = kfd.item()
return kfd
@jit((types.Array(types.float64, 1, 'C', readonly=True), types.int32))
def _higuchi_fd(x, kmax):
"""Utility function for `higuchi_fd`.
"""
n_times = x.size
lk = np.empty(kmax)
x_reg = np.empty(kmax)
y_reg = np.empty(kmax)
for k in range(1, kmax + 1):
lm = np.empty((k,))
for m in range(k):
ll = 0
n_max = floor((n_times - m - 1) / k)
n_max = int(n_max)
for j in range(1, n_max):
ll += abs(x[m + j * k] - x[m + (j - 1) * k])
ll /= k
ll *= (n_times - 1) / (k * n_max)
lm[m] = ll
# Mean of lm
m_lm = 0
for m in range(k):
m_lm += lm[m]
m_lm /= k
lk[k - 1] = m_lm
x_reg[k - 1] = log(1. / k)
y_reg[k - 1] = log(m_lm)
higuchi, _ = _linear_regression(x_reg, y_reg)
return higuchi
def higuchi_fd(x, kmax=10):
"""Higuchi Fractal Dimension.
Parameters
----------
x : list or np.array
One dimensional time series.
kmax : int
Maximum delay/offset (in number of samples).
Returns
-------
hfd : float
Higuchi fractal dimension.
Notes
-----
Original code from the `mne-features <https://mne.tools/mne-features/>`_
package by Jean-Baptiste Schiratti and Alexandre Gramfort.
This function uses Numba to speed up the computation.
References
----------
Higuchi, Tomoyuki. "Approach to an irregular time series on the
basis of the fractal theory." Physica D: Nonlinear Phenomena 31.2
(1988): 277-283.
Examples
--------
>>> import numpy as np
>>> import antropy as ant
>>> import stochastic.processes.noise as sn
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.5, rng=rng).sample(10000)
>>> print(f"{ant.higuchi_fd(x):.4f}")
1.9983
Fractional Gaussian noise with H = 0.9
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.9, rng=rng).sample(10000)
>>> print(f"{ant.higuchi_fd(x):.4f}")
1.8517
Fractional Gaussian noise with H = 0.1
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.1, rng=rng).sample(10000)
>>> print(f"{ant.higuchi_fd(x):.4f}")
2.0581
Random
>>> rng = np.random.default_rng(seed=42)
>>> print(f"{ant.higuchi_fd(rng.random(1000)):.4f}")
2.0013
Pure sine wave
>>> x = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)
>>> print(f"{ant.higuchi_fd(x):.4f}")
1.0091
Linearly-increasing time-series
>>> x = np.arange(1000)
>>> print(f"{ant.higuchi_fd(x):.4f}")
1.0040
"""
x = np.asarray(x, dtype=np.float64)
kmax = int(kmax)
return _higuchi_fd(x, kmax)
@jit('f8(f8[:])', nopython=True)
def _dfa(x):
"""
Utility function for detrended fluctuation analysis
"""
N = len(x)
nvals = _log_n(4, 0.1 * N, 1.2)
walk = np.cumsum(x - x.mean())
fluctuations = np.zeros(len(nvals))
for i_n, n in enumerate(nvals):
d = np.reshape(walk[:N - (N % n)], (N // n, n))
ran_n = np.array([float(na) for na in range(n)])
d_len = len(d)
trend = np.empty((d_len, ran_n.size))
for i in range(d_len):
slope, intercept = _linear_regression(ran_n, d[i])
trend[i, :] = intercept + slope * ran_n
# Calculate root mean squares of walks in d around trend
# Note that np.mean on specific axis is not supported by Numba
flucs = np.sum((d - trend) ** 2, axis=1) / n
# https://github.com/neuropsychology/NeuroKit/issues/206
fluctuations[i_n] = np.sqrt(np.mean(flucs))
# Filter zero
nonzero = np.nonzero(fluctuations)[0]
fluctuations = fluctuations[nonzero]
nvals = nvals[nonzero]
if len(fluctuations) == 0:
# all fluctuations are zero => we cannot fit a line
dfa = np.nan
else:
dfa, _ = _linear_regression(np.log(nvals), np.log(fluctuations))
return dfa
def detrended_fluctuation(x):
"""
Detrended fluctuation analysis (DFA).
Parameters
----------
x : list or np.array
One-dimensional time-series.
Returns
-------
alpha : float
the estimate alpha (:math:`\\alpha`) for the Hurst parameter.
:math:`\\alpha < 1`` indicates a
stationary process similar to fractional Gaussian noise with
:math:`H = \\alpha`.
:math:`\\alpha > 1`` indicates a non-stationary process similar to
fractional Brownian motion with :math:`H = \\alpha - 1`
Notes
-----
`Detrended fluctuation analysis
<https://en.wikipedia.org/wiki/Detrended_fluctuation_analysis>`_
is used to find long-term statistical dependencies in time series.
The idea behind DFA originates from the definition of self-affine
processes. A process :math:`X` is said to be self-affine if the standard
deviation of the values within a window of length n changes with the window
length factor :math:`L` in a power law:
.. math:: \\text{std}(X, L * n) = L^H * \\text{std}(X, n)
where :math:`\\text{std}(X, k)` is the standard deviation of the process
:math:`X` calculated over windows of size :math:`k`. In this equation,
:math:`H` is called the Hurst parameter, which behaves indeed very similar
to the Hurst exponant.
For more details, please refer to the excellent documentation of the
`nolds <https://cschoel.github.io/nolds/>`_
Python package by Christopher Scholzel, from which this function is taken:
https://cschoel.github.io/nolds/nolds.html#detrended-fluctuation-analysis
Note that the default subseries size is set to
entropy.utils._log_n(4, 0.1 * len(x), 1.2)). The current implementation
does not allow to manually specify the subseries size or use overlapping
windows.
The code is a faster (Numba) adaptation of the original code by Christopher
Scholzel.
References
----------
* C.-K. Peng, S. V. Buldyrev, S. Havlin, M. Simons,
H. E. Stanley, and A. L. Goldberger, “Mosaic organization of
DNA nucleotides,” Physical Review E, vol. 49, no. 2, 1994.
* R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
V. V. Nikulin, H. D. Mansvelder, and K. Linkenkaer-Hansen,
“Detrended fluctuation analysis: A scale-free view on neuronal
oscillations,” Frontiers in Physiology, vol. 30, 2012.
Examples
--------
Fractional Gaussian noise with H = 0.5
>>> import numpy as np
>>> import antropy as ant
>>> import stochastic.processes.noise as sn
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.5, rng=rng).sample(10000)
>>> print(f"{ant.detrended_fluctuation(x):.4f}")
0.5216
Fractional Gaussian noise with H = 0.9
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.9, rng=rng).sample(10000)
>>> print(f"{ant.detrended_fluctuation(x):.4f}")
0.8833
Fractional Gaussian noise with H = 0.1
>>> rng = np.random.default_rng(seed=42)
>>> x = sn.FractionalGaussianNoise(hurst=0.1, rng=rng).sample(10000)
>>> print(f"{ant.detrended_fluctuation(x):.4f}")
0.1262
Random
>>> rng = np.random.default_rng(seed=42)
>>> print(f"{ant.detrended_fluctuation(rng.random(1000)):.4f}")
0.5276
Pure sine wave
>>> x = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)
>>> print(f"{ant.detrended_fluctuation(x):.4f}")
1.5848
Linearly-increasing time-series
>>> x = np.arange(1000)
>>> print(f"{ant.detrended_fluctuation(x):.4f}")
2.0390
"""
x = np.asarray(x, dtype=np.float64)
return _dfa(x)
|
raphaelvallat/antropy
|
antropy/fractal.py
|
Python
|
bsd-3-clause
| 13,447
|
[
"Gaussian"
] |
7acdd5172316e75c3ccc73453c9a7f6f2d0d2a627c96e20a0d07e649a1155266
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul V16 10:18:13 2015
@author: Michael
This file is part of beam-cam, a camera project to monitor and characterise laser beams.
Copyright (C) 2015 Christian Gross <christian.gross@mpq.mpg.de>, Timon Hilker <timon.hilker@mpq.mpg.de>, Michael Hoese <michael.hoese@physik.lmu.de>, and Konrad Viebahn <kv291@cam.ac.uk>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Please see the README.md file for a copy of the GNU General Public License, or otherwise find it on <http://www.gnu.org/licenses/>.
"""
import GaussBeamSimulation as Sim
reload(Sim)
import MathematicalTools as MatTools
reload(MatTools)
from ctypes import *
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sys
ExposureTimeAddress = c_int(0x1001)
ExposureAutoAddress = c_int(0x1003)
GainValueAddress = c_int(0x1023)
GainAutoAddress = c_int(0x1024)
FilterGammaAddress = c_int(0x3100)
FilterLuminanceAddress = c_int(0x3101)
FilterContrastAddress = c_int(0x3102)
FilterBlacklevelAddress = c_int(0x3103)
SensorRoiAddress = c_int(0x3010)
PixelClockAddress = c_int(0x2100)
HBlankDurationAddress = c_int(0x1010)
VBlankDurationAddress = c_int(0x1011)
VRefAddress = c_int(0x1070)
BlacklevelAutoAddress = c_int(0x1071)
BlacklevelAdjustAddress = c_int(0x1072)
FlipHorizontalAddress = c_int(0x1046)
FlipVerticalAddress = c_int(0x1047)
SourceFormatAddress = c_int(0x3000)
class CameraKey(Structure):
'''Struct that holds the key of a camera'''
_fields_ = [
('m_serial',c_uint),
('mp_manufacturer_str',POINTER(c_char)),
('mp_product_str',POINTER(c_char)),
('m_busy',c_uint),
('mp_private',POINTER(c_void_p))
]
def __init__(self):
pass
class ImageFormat(Structure):
'''Struct that holds the image format'''
_fields_ = [
('m_width',c_uint),
('m_height',c_uint),
('m_color_format',c_int),
('m_image_modifier',c_int)
]
def __init__(self):
pass
class Image(Structure):
'''Struct that holds the image'''
_fields_ = [
('m_image_format',ImageFormat),
('mp_buffer',POINTER(c_char)),
('m_pitch',c_uint),
('m_time_stamp',c_double),
('mp_private',POINTER(c_void_p))
]
def __init__(self):
pass
class Rect(Structure):
'''Struct that holds the data for a roi of the image'''
_fields_ = [
('m_left',c_int),
('m_top',c_int),
('m_width',c_int),
('m_height',c_int)
]
def __init__(self):
pass
class VRmagicUSBCam_API:
'''Functions for the VR Magic USB Camera.'''
def __init__(self, dllPath='vrmusbcam2.dll'):
self.dll = cdll.LoadLibrary(dllPath)
def ShowErrorInformation(self):
inf = POINTER(c_char)
addr = self.dll.VRmUsbCamGetLastError()
# addr = c_int(addr)
message = cast(addr, inf)
Message = []
i = 0
while message[i] != '\0':
Message.append(message[i])
i += 1
Message = ''.join(Message)
print '!ERROR!: ', Message
def GetDeviceKeyList(self):
Error = self.dll.VRmUsbCamUpdateDeviceKeyList()
if Error==0:
self.ShowErrorInformation()
print 'KeyList'
def GetDeviceKeyListSize(self):
No=c_uint(0)
Error = self.dll.VRmUsbCamGetDeviceKeyListSize(byref(No))
print 'Number of cameras', No.value
if Error==1:
return No.value
else:
self.ShowErrorInformation()
def GetDeviceKeyListEntry(self):
self.CamIndex = 0
self.CamIndex = c_uint(self.CamIndex)
# Key_p = POINTER(CameraKey)
self.dll.VRmUsbCamGetDeviceKeyListEntry.argtypes = [c_uint,POINTER(POINTER(CameraKey))]
self.key = POINTER(CameraKey)()
Key = self.dll.VRmUsbCamGetDeviceKeyListEntry(self.CamIndex,byref(self.key))
if Key==0:
self.ShowErrorInformation()
return 0
else:
return 1
def GetDeviceInformation(self,keytest=0):
if keytest==0:
print 'No valid key available!'
else:
ID = c_uint(0)
ErrID = self.dll.VRmUsbCamGetProductId(self.key,byref(ID))
inf = POINTER(c_char)()
Errinf = self.dll.VRmUsbCamGetSerialString(self.key,byref(inf))
print 'Key', self.key
print ErrID, 'ID', ID.value
serial = []
i = 0
while inf[i] != '\0':
serial.append(inf[i])
i += 1
serial = ''.join(serial)
print 'Serial String: ', serial
print 'Busy: ', self.key.contents.m_busy
'''
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Functions to handle important properties
---------------------------------------------------------------------------
---------------------------------------------------------------------------
'''
def GetExposureTime(self,device):
ExpoTime = c_float(0.0)
Error = self.dll.VRmUsbCamGetPropertyValueF(device, ExposureTimeAddress, byref(ExpoTime))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Exposure Time: ', ExpoTime.value, 'ms'
def SetExposureTime(self,device,exposuretime):
ExpoTime = c_float(exposuretime)
Error = self.dll.VRmUsbCamSetPropertyValueF(device, ExposureTimeAddress, byref(ExpoTime))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Exposure Time set to: ', ExpoTime.value, 'ms'
def GetExposureAuto(self,device):
ExpoAuto = c_bool(False)
Error = self.dll.VRmUsbCamGetPropertyValueB(device, ExposureAutoAddress, byref(ExpoAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Exposure Auto: ', ExpoAuto.value
def SetExposureAuto(self,device,exposureauto=False):
ExpoAuto = c_bool(exposureauto)
Error = self.dll.VRmUsbCamSetPropertyValueB(device, ExposureAutoAddress, byref(ExpoAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Exposure Auto set to: ', ExpoAuto.value
def GetGainValue(self,device):
GainValue = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, GainValueAddress, byref(GainValue))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Gain Value: ', GainValue.value
def SetGainValue(self,device,gainvalue=0):
GainValue = c_int(gainvalue)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, GainValueAddress, byref(GainValue))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Gain Value set to: ', GainValue.value
def GetGainAuto(self,device):
GainAuto = c_bool(0)
Error = self.dll.VRmUsbCamGetPropertyValueB(device, GainAutoAddress, byref(GainAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Gain Auto: ', GainAuto.value
def SetGainAuto(self,device,gainauto=False):
GainAuto = c_bool(gainauto)
Error = self.dll.VRmUsbCamSetPropertyValueB(device, GainAutoAddress, byref(GainAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Gain Auto set to: ', GainAuto.value
def GetFilterGamma(self,device):
FilterGamma = c_float(0.)
Error = self.dll.VRmUsbCamGetPropertyValueF(device, FilterGammaAddress, byref(FilterGamma))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Gamma: ', FilterGamma.value
def SetFilterGamma(self,device,filtergamma=1.0):
FilterGamma = c_float(filtergamma)
Error = self.dll.VRmUsbCamSetPropertyValueF(device, FilterGammaAddress, byref(FilterGamma))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Gamma set to: ', FilterGamma.value
def GetFilterContrast(self,device):
FilterContrast = c_float(0.)
Error = self.dll.VRmUsbCamGetPropertyValueF(device, FilterContrastAddress, byref(FilterContrast))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Contrast: ', FilterContrast.value
def SetFilterContrast(self,device,filtercontrast=1.0):
FilterContrast = c_float(filtercontrast)
Error = self.dll.VRmUsbCamSetPropertyValueF(device, FilterContrastAddress, byref(FilterContrast))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Contrast set to: ', FilterContrast.value
def GetFilterLuminance(self,device):
FilterLuminance = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, FilterLuminanceAddress, byref(FilterLuminance))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Luminance: ', FilterLuminance.value
def SetFilterLuminance(self,device,filterluminance=0):
FilterLuminance = c_int(filterluminance)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, FilterLuminanceAddress, byref(FilterLuminance))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Luminance set to: ', FilterLuminance.value
def GetFilterBlacklevel(self,device):
FilterBlacklevel = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, FilterBlacklevelAddress, byref(FilterBlacklevel))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Blacklevel: ', FilterBlacklevel.value
def SetFilterBlacklevel(self,device,filterblacklevel=0):
FilterBlacklevel = c_int(filterblacklevel)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, FilterBlacklevelAddress, byref(FilterBlacklevel))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Filter Blacklevel set to: ', FilterBlacklevel.value
def GetSensorRoi(self,device):
SensorRoi = Rect()
Error = self.dll.VRmUsbCamGetPropertyValueRectI(device, SensorRoiAddress, byref(SensorRoi))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Sensor Roi: ', SensorRoi.m_left, ':', SensorRoi.m_width, 'X', SensorRoi.m_top, ':', SensorRoi.m_height
def SetSensorRoi(self,device,sensorroi=(0,0,754,480)):
'''"sensorroi" format: (left,top,width,height)'''
SensorRoi = Rect()
SensorRoi.m_left = sensorroi[0]
SensorRoi.m_top = sensorroi[1]
SensorRoi.m_width = sensorroi[2]
SensorRoi.m_height = sensorroi[3]
Error = self.dll.VRmUsbCamSetPropertyValueRectI(device, SensorRoiAddress, byref(SensorRoi))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Sensor Roi set to: ', SensorRoi.m_left, ':', SensorRoi.m_width, 'X', SensorRoi.m_top, ':', SensorRoi.m_height
def GetPixelClock(self,device):
PixelClock = c_float(0.)
Error = self.dll.VRmUsbCamGetPropertyValueF(device, PixelClockAddress, byref(PixelClock))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Pixel Clock: ', PixelClock.value
def SetPixelClock(self,device,pixelclock=13.0):
PixelClock = c_float(pixelclock)
Error = self.dll.VRmUsbCamSetPropertyValueF(device, PixelClockAddress, byref(PixelClock))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Pixel Clock set to: ', PixelClock.value
def GetHBlankDuration(self,device):
HBlankDuration = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, HBlankDurationAddress, byref(HBlankDuration))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'HBlank Duration: ', HBlankDuration.value
def SetHBlankDuration(self,device,hblankduration=61):
HBlankDuration = c_int(hblankduration)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, HBlankDurationAddress, byref(HBlankDuration))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'HBlank Duration set to: ', HBlankDuration.value
def GetVBlankDuration(self,device):
VBlankDuration = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, VBlankDurationAddress, byref(VBlankDuration))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'VBlank Duration: ', VBlankDuration.value
def SetVBlankDuration(self,device,vblankduration=5):
VBlankDuration = c_int(vblankduration)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, VBlankDurationAddress, byref(VBlankDuration))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'VBlank Duration set to: ', VBlankDuration.value
def GetVRef(self,device):
VRef = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, VRefAddress, byref(VRef))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'VRef: ', VRef.value
def SetVRef(self,device,vref=0):
VRef = c_int(vref)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, VRefAddress, byref(VRef))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'VRef set to: ', VRef.value
def GetBlacklevelAuto(self,device):
BlacklevelAuto = c_bool(0)
Error = self.dll.VRmUsbCamGetPropertyValueB(device, BlacklevelAutoAddress, byref(BlacklevelAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Blacklevel Auto: ', BlacklevelAuto.value
def SetBlacklevelAuto(self,device,blacklevelauto=False):
BlacklevelAuto = c_bool(blacklevelauto)
Error = self.dll.VRmUsbCamSetPropertyValueB(device, BlacklevelAutoAddress, byref(BlacklevelAuto))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Blacklevel Auto set to: ', BlacklevelAuto.value
def GetBlacklevelAdjust(self,device):
BlacklevelAdjust = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyValueI(device, BlacklevelAdjustAddress, byref(BlacklevelAdjust))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Blacklevel Adjust: ', BlacklevelAdjust.value
def SetBlacklevelAdjust(self,device,blackleveladjust=0):
BlacklevelAdjust = c_int(blackleveladjust)
Error = self.dll.VRmUsbCamSetPropertyValueI(device, BlacklevelAdjustAddress, byref(BlacklevelAdjust))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Blacklevel Adjust set to: ', BlacklevelAdjust.value
def GetFlipHorizontal(self,device):
FlipHorizontal = c_bool(0)
Error = self.dll.VRmUsbCamGetPropertyValueB(device, FlipHorizontalAddress, byref(FlipHorizontal))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Flip Horizontal: ', FlipHorizontal.value
def SetFlipHorizontal(self,device,fliphorizontal=False):
FlipHorizontal = c_bool(fliphorizontal)
Error = self.dll.VRmUsbCamSetPropertyValueB(device, FlipHorizontalAddress, byref(FlipHorizontal))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Flip Horizontal set to: ', FlipHorizontal.value
def GetFlipVertical(self,device):
FlipVertical = c_bool(0)
Error = self.dll.VRmUsbCamGetPropertyValueB(device, FlipVerticalAddress, byref(FlipVertical))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Flip Vertical: ', FlipVertical.value
def SetFlipVertical(self,device,flipvertical=False):
FlipVertical = c_bool(flipvertical)
Error = self.dll.VRmUsbCamSetPropertyValueB(device, FlipVerticalAddress, byref(FlipVertical))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Flip Vertical set to: ', FlipVertical.value
'''------------------------------------------------------------------------'''
def GetSourceFormat(self,device):
'''Working, but not understood!'''
SourceFormat = c_int(0)
Error = self.dll.VRmUsbCamGetPropertyAttribsE(device, SourceFormatAddress, byref(SourceFormat))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Source Format: ', SourceFormat
def SetSourceFormat(self,device,sourceformat):
'''Working, but not understood!'''
SourceFormat = c_int(sourceformat)
Error = self.dll.VRmUsbCamSetPropertyAttribsE(device, SourceFormatAddress, byref(SourceFormat))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print 'Source Format set to: ', SourceFormat
'''------------------------------------------------------------------------'''
'''
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Functions to handle images
---------------------------------------------------------------------------
---------------------------------------------------------------------------
'''
def TakePicture(self,keytest=0):
'''
Not updated; does not work!!
'''
if keytest==0:
print 'No valid key available!'
elif self.key.contents.m_busy!=0:
print 'Camera is busy!'
else:
Error = self.dll.VRmUsbCamOpenDevice(self.key,byref(self.CamIndex))
if Error==0:
self.ShowErrorInformation()
else:
print 'Device opend successfully'
self.GetExposureTime(self.CamIndex)
format = ImageFormat()
format.m_width = 754
format.m_height = 482
format.m_color_format = 4
format.m_image_modifier = 0
inf = POINTER(c_char)()
Error = self.dll.VRmUsbCamGetStringFromColorFormat(format.m_color_format,byref(inf))
color = []
i = 0
while inf[i] != '\0':
color.append(inf[i])
i += 1
color = ''.join(color)
print 'Color format: ', color
pixeldepth = c_uint(0)
Error = self.dll.VRmUsbCamGetPixelDepthFromColorFormat(format.m_color_format,byref(pixeldepth))
print 'Pixel Depth: ', pixeldepth.value
self.dll.VRmUsbCamNewImage.argtypes = [POINTER(POINTER(Image)),ImageFormat]
self.image_p = POINTER(Image)()
Error = self.dll.VRmUsbCamStart(self.CamIndex)
Error = self.dll.VRmUsbCamNewImage(byref(self.image_p),format)
Error = self.dll.VRmUsbCamStop(self.CamIndex)
print 'Pitch: ', self.image_p.contents.m_pitch
if Error==0:
self.ShowErrorInformation()
if Error==1:
print'Image taken!'
ImageList = list(self.image_p.contents.mp_buffer[0:(format.m_height)*int(self.image_p.contents.m_pitch)])
# print ImageList[0:10]
# print len(ImageList)
ImageList = [ord(i) for i in ImageList]
print len(ImageList)
self.ImageArray = np.array(ImageList)
self.ImageArray = np.reshape(self.ImageArray,(format.m_height,int(self.image_p.contents.m_pitch)))
self.ImageArray = self.ImageArray[:,:format.m_width]
# for j in range(format.m_height):
# for i in range(format.m_width):
# self.ImageArray[j,i] = ord(self.image_p.contents.mp_buffer[j*int(pixeldepth.value)+i*int(self.image_p.contents.m_pitch)])
# print ord(ImageList[i*int(pixeldepth.value)+j*int(self.image_p.contents.m_pitch)])
plt.figure()
plt.imshow(self.ImageArray, cmap = cm.Greys_r)
name_p = c_char_p('Test.png')
Error = self.dll.VRmUsbCamSavePNG(name_p,self.image_p,c_int(0))
if Error==0:
self.ShowErrorInformation()
if Error==1:
print'Image saved!'
Error = self.dll.VRmUsbCamFreeImage(byref(self.image_p))
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamFreeDeviceKey(byref(self.key))
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamCloseDevice(self.CamIndex)
if Error==0:
self.ShowErrorInformation()
def UseSourceFormat(self):
Error = self.dll.VRmUsbCamGetSourceFormatEx(self.CamIndex,c_uint(1),byref(self.format))
if Error==0:
self.ShowErrorInformation()
def GetSourceFormatInformation(self):
inf = POINTER(c_char)()
Error = self.dll.VRmUsbCamGetSourceFormatDescription(self.CamIndex,c_uint(1),byref(inf))
if Error==0:
self.ShowErrorInformation()
sourceformat = []
i = 0
while inf[i] != '\0':
sourceformat.append(inf[i])
i += 1
sourceformat = ''.join(sourceformat)
print 'Source format: ', sourceformat
def GrabNextImage(self):
self.dll.VRmUsbCamLockNextImageEx2.argtypes = [c_uint,c_uint,POINTER(POINTER(Image)),POINTER(c_uint),c_int]
source_image_p = POINTER(Image)()
framesdropped = c_uint(0)
timeout = 5000
Error = self.dll.VRmUsbCamLockNextImageEx2(self.CamIndex,c_uint(1),byref(source_image_p),byref(framesdropped),c_int(timeout))
if Error==0:
self.ShowErrorInformation()
if Error==1:
# print'Image taken!'
ImageList = list(source_image_p.contents.mp_buffer[0:(self.format.m_height)*int(source_image_p.contents.m_pitch)])
ImageList = [ord(i) for i in ImageList]
# print len(ImageList)
self.ImageArray = np.array(ImageList)
self.ImageArray = np.reshape(self.ImageArray,(self.format.m_height,int(source_image_p.contents.m_pitch)))
self.ImageArray = self.ImageArray[:,:self.format.m_width]
Error = self.dll.VRmUsbCamUnlockNextImage(self.CamIndex,byref(source_image_p))
# print 'Unlock Image'
if Error==0:
self.ShowErrorInformation()
def TakePictureGrabbing(self,keytest=0):
if keytest==0:
print 'No valid key available!'
elif self.key.contents.m_busy!=0:
print 'Camera is busy!'
else:
Error = self.dll.VRmUsbCamOpenDevice(self.key,byref(self.CamIndex))
if Error==0:
self.ShowErrorInformation()
else:
print 'Device opened successfully'
self.format = ImageFormat()
self.format.m_width = 754
self.format.m_height = 480
self.format.m_color_format = 4
self.format.m_image_modifier = 0
inf = POINTER(c_char)()
Error = self.dll.VRmUsbCamGetStringFromColorFormat(self.format.m_color_format,byref(inf))
color = []
i = 0
while inf[i] != '\0':
color.append(inf[i])
i += 1
color = ''.join(color)
print 'Color format: ', color
pixeldepth = c_uint(0)
self.GetExposureTime(self.CamIndex)
self.SetExposureTime(self.CamIndex,0.75)
self.GetExposureTime(self.CamIndex)
self.GetExposureAuto(self.CamIndex)
self.SetExposureAuto(self.CamIndex,False)
self.GetExposureAuto(self.CamIndex)
self.GetGainValue(self.CamIndex)
self.SetGainValue(self.CamIndex,16)
self.GetGainAuto(self.CamIndex)
self.SetGainAuto(self.CamIndex,False)
self.GetFilterGamma(self.CamIndex)
self.SetFilterGamma(self.CamIndex)
self.GetFilterContrast(self.CamIndex)
self.SetFilterContrast(self.CamIndex)
self.GetFilterLuminance(self.CamIndex)
self.SetFilterLuminance(self.CamIndex)
self.GetFilterBlacklevel(self.CamIndex)
self.SetFilterBlacklevel(self.CamIndex)
self.GetSensorRoi(self.CamIndex)
self.SetSensorRoi(self.CamIndex)
self.GetFlipVertical(self.CamIndex)
self.SetFlipVertical(self.CamIndex)
# self.GetGainDoubling(self.CamIndex)
Error = self.dll.VRmUsbCamGetPixelDepthFromColorFormat(self.format.m_color_format,byref(pixeldepth))
print 'Pixel Depth: ', pixeldepth.value
self.GetSourceFormatInformation()
self.UseSourceFormat()
Error = self.dll.VRmUsbCamStart(self.CamIndex)
print 'Start Cam'
self.GrabNextImage()
Error = self.dll.VRmUsbCamStop(self.CamIndex)
if Error==0:
self.ShowErrorInformation()
plt.figure()
plt.imshow(self.ImageArray, cmap = cm.Greys_r)
plt.colorbar()
# name_p = c_char_p('Test.png')
# Error = self.dll.VRmUsbCamSavePNG(name_p,source_image_p,c_int(0))
# if Error==0:
# self.ShowErrorInformation()
# if Error==1:
# print'Image saved!'
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamFreeDeviceKey(byref(self.key))
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamCloseDevice(self.CamIndex)
if Error==0:
self.ShowErrorInformation()
def RealTimeView(self,keytest=0):
if keytest==0:
print 'No valid key available!'
elif self.key.contents.m_busy!=0:
print 'Camera is busy!'
else:
Error = self.dll.VRmUsbCamOpenDevice(self.key,byref(self.CamIndex))
if Error==0:
self.ShowErrorInformation()
else:
print 'Device opened successfully'
self.GetExposureTime(self.CamIndex)
self.SetExposureTime(self.CamIndex,0.75)
self.GetExposureTime(self.CamIndex)
self.GetExposureAuto(self.CamIndex)
self.SetExposureAuto(self.CamIndex,False)
self.GetExposureAuto(self.CamIndex)
self.format = ImageFormat()
self.GetSourceFormatInformation()
self.UseSourceFormat()
Error = self.dll.VRmUsbCamStart(self.CamIndex)
print 'Started Cam'
app = QtGui.QApplication([])
# ## Create window with GraphicsView widget
# win = pg.GraphicsLayoutWidget()
# win.show() ## show widget alone in its own window
# win.setWindowTitle('pyqtgraph example: ImageItem')
# view = win.addViewBox()
# ## lock the aspect ratio so pixels are always square
# view.setAspectLocked(True)
# ## Create image item
# img = pg.ImageItem(border='w')
# view.addItem(img)
# ## Set initial view bounds
# view.setRange(QtCore.QRectF(0, 0, 754, 480))
# self.GrabNextImage()
# self.ImageArray = self.ImageArray.flatten
# i = 0
# updateTime = ptime.time()
# fps = 0
# def updateData():
# global img, i, updateTime, fps
# ## Display the data
# img.setImage(self.ImageArray[i])
# i = (i+1) % self.ImageArray.shape[0]
# QtCore.QTimer.singleShot(1, updateData)
# now = ptime.time()
# fps2 = 1.0 / (now-updateTime)
# updateTime = now
# fps = fps * 0.9 + fps2 * 0.1
# #print "%0.1f fps" % fps
# updateData()
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# # QtGui.QApplication.instance().exec_()
# pg.exit()
# Error = self.dll.VRmUsbCamStop(self.CamIndex)
# if Error==0:
# self.ShowErrorInformation()
win = QtGui.QWidget()
# Image widget
imagewidget = pg.GraphicsLayoutWidget()
view = imagewidget.addViewBox()
view.setAspectLocked(True)
self.img = pg.ImageItem(border='k')
view.addItem(self.img)
view.setRange(QtCore.QRectF(0, 0, 754, 480))
# Custom ROI for selecting an image region
roi = pg.ROI([0, 200], [100, 200],pen=(0,9))
roi.addScaleHandle([0.5, 1], [0.5, 0.5])
roi.addScaleHandle([0, 0.5], [0.5, 0.5])
view.addItem(roi)
roi.setZValue(10) # make sure ROI is drawn above
p3 = imagewidget.addPlot(colspan=1)
# p3.rotate(90)
p3.setMaximumWidth(200)
# Another plot area for displaying ROI data
imagewidget.nextRow()
p2 = imagewidget.addPlot(colspan=1)
p2.setMaximumHeight(200)
# win.show()
layout = QtGui.QGridLayout()
win.setLayout(layout)
win.setWindowTitle('VRmagic USB Cam Live View')
layout.addWidget(imagewidget, 1, 2, 3, 1)
win.resize(1100, 870)
win.show()
def updateview():
self.GrabNextImage()
self.img.setImage(self.ImageArray.T)
updateRoi()
def updateRoi():
selected = roi.getArrayRegion(self.ImageArray.T, self.img)
p2.plot(selected.sum(axis=1), clear=True)
p3.plot(selected.sum(axis=0), clear=True).rotate(-90)
roi.sigRegionChanged.connect(updateRoi)
viewtimer = QtCore.QTimer()
viewtimer.timeout.connect(updateview)
viewtimer.start(0)
app.exec_()
viewtimer.stop()
Error = self.dll.VRmUsbCamStop(self.CamIndex)
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamFreeDeviceKey(byref(self.key))
if Error==0:
self.ShowErrorInformation()
Error = self.dll.VRmUsbCamCloseDevice(self.CamIndex)
if Error==0:
self.ShowErrorInformation()
def RealTimeViewTest(self):
simulation = Sim.GaussBeamSimulation()
simulation.CreateImages()
app = QtGui.QApplication([])
win = QtGui.QWidget()
# Image widget
imagewidget = pg.GraphicsLayoutWidget()
view = imagewidget.addViewBox()
view.setAspectLocked(True)
self.img = pg.ImageItem(border='k')
view.addItem(self.img)
view.setRange(QtCore.QRectF(0, 0, 754, 480))
# Custom ROI for selecting an image region
roi = pg.ROI([310, 210], [200, 200],pen=(0,9))
roi.addScaleHandle([0.5, 1], [0.5, 0.5])
roi.addScaleHandle([0, 0.5], [0.5, 0.5])
view.addItem(roi)
roi.setZValue(10) # make sure ROI is drawn above
peak = pg.GraphItem()
symbol = ['x']
view.addItem(peak)
roi.setZValue(20)
p3 = imagewidget.addPlot(colspan=1)
# p3.rotate(90)
p3.setMaximumWidth(200)
# Another plot area for displaying ROI data
imagewidget.nextRow()
p2 = imagewidget.addPlot(colspan=1)
p2.setMaximumHeight(200)
#cross hair
vLine = pg.InfiniteLine(angle=90, movable=False)
hLine = pg.InfiniteLine(angle=0, movable=False)
view.addItem(vLine, ignoreBounds=True)
view.addItem(hLine, ignoreBounds=True)
# win.show()
layout = QtGui.QGridLayout()
win.setLayout(layout)
win.setWindowTitle('VRmagic USB Cam Live View')
layout.addWidget(imagewidget, 1, 2, 3, 1)
win.resize(1100, 870)
win.show()
def updateview():
# simulation.NewImage()
# simulation.AddWhiteNoise()
# simulation.AddRandomGauss()
# simulation.SimulateTotalImage()
simulation.ChooseImage()
self.ImageArray = simulation.image
self.img.setImage(self.ImageArray.T)
updateRoi()
def updateRoi():
selected = roi.getArrayRegion(self.ImageArray.T, self.img)
p2.plot(selected.sum(axis=1), clear=True)
datahor = selected.sum(axis=1)
FittedParamsHor = MatTools.FitGaussian(datahor)[0]
xhor = np.arange(datahor.size)
p2.plot(MatTools.gaussian(xhor,*FittedParamsHor), pen=(0,255,0))
p3.plot(selected.sum(axis=0), clear=True).rotate(-90)
datavert = selected.sum(axis=0)
FittedParamsVert = MatTools.FitGaussian(datavert)[0]
xvert = np.arange(datavert.size)
p3.plot(MatTools.gaussian(xvert,*FittedParamsVert), pen=(0,255,0)).rotate(-90)
hLine.setPos(FittedParamsVert[2]+roi.pos()[1])
vLine.setPos(FittedParamsHor[2]+roi.pos()[0])
pos = np.array([[(FittedParamsHor[2]+roi.pos()[0]),(FittedParamsVert[2]+roi.pos()[1])]])
peak.setData(pos=pos,symbol=symbol,size=25, symbolPen='g', symbolBrush='g')
# print roi.pos, 'ROI Position'
# print 'ROI Sum: ', selected.sum(axis=1)
roi.sigRegionChanged.connect(updateRoi)
viewtimer = QtCore.QTimer()
viewtimer.timeout.connect(updateview)
viewtimer.start(0)
app.exec_()
viewtimer.stop()
if __name__=="__main__":
check = VRmagicUSBCam_API()
check.GetDeviceKeyList()
check.GetDeviceKeyListSize()
keycheck = check.GetDeviceKeyListEntry()
check.GetDeviceInformation(keycheck)
# check.TakePictureGrabbing(keycheck)
# check.RealTimeView(keycheck)
check.RealTimeViewTest()
plt.show()
|
kviebahn/beam-cam
|
FirstTest.py
|
Python
|
gpl-3.0
| 36,567
|
[
"Gaussian"
] |
3bf3a0bf09e582fbbf5ebca019d87fe2d7045965db012febba0d80b7d4ce47fc
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import scipy
from gnuradio import filter
from PyQt4 import QtGui
# Filter design functions using a window
def design_win_lpf(fs, gain, wintype, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = sb - pb
try:
taps = filter.firdes.low_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_bpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_cbpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.complex_band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_bnf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_reject_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_hpf(fs, gain, wintype, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = pb - sb
try:
taps = filter.firdes.high_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_hb(fs, gain, wintype, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
filtwin = { filter.firdes.WIN_HAMMING : 'hamming',
filter.firdes.WIN_HANN : 'hanning',
filter.firdes.WIN_BLACKMAN : 'blackman',
filter.firdes.WIN_RECTANGULAR: 'boxcar',
filter.firdes.WIN_KAISER: ('kaiser', 4.0),
filter.firdes.WIN_BLACKMAN_hARRIS: 'blackmanharris'}
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
taps = scipy.signal.firwin(int(filtord)+1, 0.5, window = filtwin[wintype])
taps[abs(taps) <= 1e-6] = 0.
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hb","ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_rrc(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.rrcSymbolRateEdit.text().toDouble()
ret = r and ret
alpha,r = mainwin.gui.rrcAlphaEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.rrcNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
try:
taps = filter.firdes.root_raised_cosine(gain, fs, sr,
alpha, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "rrc", "srate": sr, "alpha": alpha,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_gaus(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.gausSymbolRateEdit.text().toDouble()
ret = r and ret
bt,r = mainwin.gui.gausBTEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.gausNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
spb = fs / sr
try:
taps = filter.firdes.gaussian(gain, spb, bt, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "gaus", "srate": sr, "bt": bt,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
# Design Functions for Equiripple Filters
def design_opt_lpf(fs, gain, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.lpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.low_pass(gain, fs, pb, sb,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ripple": ripple, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_bpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_cbpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.complex_band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_bnf(fs, gain, mainwin):
ret = True
sb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
sb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bnfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
pb1 = sb1 - tb
pb2 = sb2 + tb
try:
taps = filter.optfir.band_reject(gain, fs, pb1, sb1, sb2, pb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hb(fs, gain, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
try:
bands = [0,.25 - (trwidth/fs), .25 + (trwidth/fs), 0.5]
taps = scipy.signal.remez(int(filtord)+1, bands, [1,0], [1,1])
taps[abs(taps) <= 1e-6] = 0.
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter Design Error",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hb", "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hpf(fs, gain, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.hpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.high_pass(gain, fs, sb, pb,
atten, ripple)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
|
balint256/gnuradio
|
gr-filter/python/filter/design/fir_design.py
|
Python
|
gpl-3.0
| 15,888
|
[
"Gaussian"
] |
2bd3803cadc8d2680fdd7199759c0cbebe9bbcd2453007cbbc063504e3cafab6
|
#! /usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
from image_processor import *
print (TCOLORS.PURPLE + "Unit Test: Generate a standard guassian Array and digital representation of a guassian array" + TCOLORS.NORMAL)
#Constants
gaussian_sigma=1.2
gaussian_bitrange=18 #This is the number of bits that will be used to multiply values in the FPGA
gaussian_width = 5
print (TCOLORS.RED + "Gaussian Array" + TCOLORS.NORMAL)
print ("\tSigma: %f" % gaussian_sigma)
print ("\tBitrange: %d (Max Value) %d" % (gaussian_bitrange, ((2 ** gaussian_bitrange) - 1)))
print ("\tArray Length: %d" % gaussian_width)
gaussian_array = gen_deviation_array(sigma = gaussian_sigma, length = gaussian_width)
digital_array = convert_gaussian_to_digital_array(gaussian_array, gaussian_bitrange)
fig = plt.figure()
a=fig.add_subplot(1,2,1)
plt.bar(range( 0, len(gaussian_array)), gaussian_array)
plt.title("Normalized Gaussian Envelope")
plt.ylabel("Weight")
plt.xlabel("Envelope Position")
plt.xticks(range( 0, len(gaussian_array)), range(0 * int(gaussian_width / 2), len(gaussian_array)))
a=fig.add_subplot(1,2,2)
plt.bar(range( 0, len(digital_array)), digital_array)
plt.title("Mapped to %d bits" % gaussian_bitrange)
plt.ylabel("Weight")
plt.xlabel("Envelope Position")
plt.xticks(range( 0, len(digital_array)), range(-1 * int(gaussian_width / 2), len(digital_array)))
plt.ylim([0, (2 ** gaussian_bitrange) - 1])
plt.show()
|
CospanDesign/python
|
image_processor/gaussian_test.py
|
Python
|
mit
| 1,446
|
[
"Gaussian"
] |
c2f0e4ca044687370cd36ec59b67cdd0388bd172bf7c5f9e357d6c5fea482043
|
# Imports
import sframe
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import copy
from PIL import Image
from io import BytesIO
import matplotlib.mlab as mlab
import colorsys
def generate_MoG_data(num_data, means, covariances, weights):
data = []
for i in range(num_data):
# Use np.random.choice and weights to pick a cluster id greater than or equal to 0 and less than num_clusters.
k = np.random.choice(len(weights), 1, p=weights)[0]
# Use np.random.multivariate_normal to create data from this cluster
x = np.random.multivariate_normal(means[k], covariances[k])
data.append(x)
return data
def log_sum_exp(Z):
""" Compute log(\sum_i exp(Z_i)) for some array Z."""
return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))
def loglikelihood(data, weights, means, covs):
""" Compute the loglikelihood of the data for a Gaussian mixture model with the given parameters. """
num_clusters = len(means)
num_dim = len(data[0])
ll = 0
for d in data:
Z = np.zeros(num_clusters)
for k in range(num_clusters):
# Compute (x-mu)^T * Sigma^{-1} * (x-mu)
delta = np.array(d) - means[k]
exponent_term = np.dot(delta.T, np.dot(np.linalg.inv(covs[k]), delta))
# Compute loglikelihood contribution for this data point and this cluster
Z[k] += np.log(weights[k])
Z[k] -= 1/2. * (num_dim * np.log(2*np.pi) + np.log(np.linalg.det(covs[k])) + exponent_term)
# Increment loglikelihood contribution of this data point across all clusters
ll += log_sum_exp(Z)
return ll
def EM(data, init_means, init_covariances, init_weights, maxiter=1000, thresh=1e-4):
# Make copies of initial parameters, which we will update during each iteration
means = copy.deepcopy(init_means)
covariances = copy.deepcopy(init_covariances)
weights = copy.deepcopy(init_weights)
# Infer dimensions of dataset and the number of clusters
num_data = len(data)
num_dim = len(data[0])
num_clusters = len(means)
# Initialize some useful variables
resp = np.zeros((num_data, num_clusters))
ll = loglikelihood(data, weights, means, covariances)
ll_trace = [ll]
for i in range(maxiter):
if i % 5 == 0:
print("Iteration %s" % i)
# E-step: compute responsibilities
# Update resp matrix so that resp[j, k] is the responsibility of cluster k for data point j.
# Hint: To compute likelihood of seeing data point j given cluster k, use multivariate_normal.pdf.
for j in range(num_data):
for k in range(num_clusters):
# YOUR CODE HERE
resp[j, k] = weights[k] * multivariate_normal.pdf(x=data[j],
mean=means[k],
cov=covariances[k])
row_sums = resp.sum(axis=1)[:, np.newaxis]
resp = resp / row_sums # normalize over all possible cluster assignments
# M-step
# Compute the total responsibility assigned to each cluster, which will be useful when
# implementing M-steps below. In the lectures this is called N^{soft}
counts = np.sum(resp, axis=0)
for k in range(num_clusters):
# Update the weight for cluster k using the M-step update rule for the cluster weight, \hat{\pi}_k.
# YOUR CODE HERE
Nsoft_k = counts[k]
weights[k] = float(Nsoft_k)/float(num_data)
# Update means for cluster k using the M-step update rule for the mean variables.
# This will assign the variable means[k] to be our estimate for \hat{\mu}_k.
weighted_sum = 0
for j in range(num_data):
# YOUR CODE HERE
weighted_sum += resp[j, k] * data[j]
# YOUR CODE HERE
means[k] = weighted_sum/Nsoft_k
# Update covariances for cluster k using the M-step update rule for covariance variables.
# This will assign the variable covariances[k] to be the estimate for \hat{Sigma}_k.
weighted_sum = np.zeros((num_dim, num_dim))
for j in range(num_data):
# YOUR CODE HERE (Hint: Use np.outer on the data[j] and this cluster's mean)
weighted_sum += resp[j, k] * np.outer(data[j] - means[k], data[j] - means[k])
# YOUR CODE HERE
covariances[k] = weighted_sum/Nsoft_k
# Compute the loglikelihood at this iteration
# YOUR CODE HERE
ll_latest = loglikelihood(data, weights, means, covariances)
ll_trace.append(ll_latest)
# Check for convergence in log-likelihood and store
if (ll_latest - ll) < thresh and ll_latest > -np.inf:
break
ll = ll_latest
if i % 5 != 0:
print("Iteration %s" % i)
out = {'weights': weights, 'means': means, 'covs': covariances, 'loglik': ll_trace, 'resp': resp}
return out
def plot_contours(data, means, covs, title):
plt.figure()
plt.plot([x[0] for x in data], [y[1] for y in data],'ko') # data
delta = 0.025
k = len(means)
x = np.arange(-2.0, 7.0, delta)
y = np.arange(-2.0, 7.0, delta)
X, Y = np.meshgrid(x, y)
col = ['green', 'red', 'indigo']
for i in range(k):
mean = means[i]
cov = covs[i]
sigmax = np.sqrt(cov[0][0])
sigmay = np.sqrt(cov[1][1])
sigmaxy = cov[0][1]/(sigmax*sigmay)
Z = mlab.bivariate_normal(X, Y, sigmax, sigmay, mean[0], mean[1], sigmaxy)
plt.contour(X, Y, Z, colors = col[i])
plt.title(title)
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def plot_responsibilities_in_RB(img, resp, title):
N, K = resp.shape
HSV_tuples = [(x*1.0/K, 0.5, 0.9) for x in range(K)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
R = img['red']
B = img['blue']
resp_by_img_int = [[resp[n][k] for k in range(K)] for n in range(N)]
cols = [tuple(np.dot(resp_by_img_int[n], np.array(RGB_tuples))) for n in range(N)]
plt.figure()
for n in range(len(R)):
plt.plot(R[n], B[n], 'o', c=cols[n])
plt.title(title)
plt.xlabel('R value')
plt.ylabel('B value')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def get_top_images(assignments, cluster, k=5):
# YOUR CODE HERE
images_in_cluster = assignments[assignments['assignments']==cluster]
print images_in_cluster
top_images = images_in_cluster.topk('probs', k)
return top_images['image']
def save_images(images, prefix):
for i, image in enumerate(images):
Image.open(BytesIO(image._image_data)).save(prefix % i)
# Model parameters
init_means = [
[5, 0], # mean of cluster 1
[1, 1], # mean of cluster 2
[0, 5] # mean of cluster 3
]
init_covariances = [
[[.5, 0.], [0, .5]], # covariance of cluster 1
[[.92, .38], [.38, .91]], # covariance of cluster 2
[[.5, 0.], [0, .5]] # covariance of cluster 3
]
init_weights = [1/4., 1/2., 1/4.] # weights of each cluster
# Generate data
np.random.seed(4)
data = generate_MoG_data(100, init_means, init_covariances, init_weights)
# Plot clusters
plt.figure()
d = np.vstack(data)
plt.plot(d[:,0], d[:,1],'ko')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Test EM algorithm
np.random.seed(4)
# Initialization of parameters
chosen = np.random.choice(len(data), 3, replace=False)
initial_means = [data[x] for x in chosen]
initial_covs = [np.cov(data, rowvar=0)] * 3
initial_weights = [1/3.] * 3
# Run EM
results = EM(data, initial_means, initial_covs, initial_weights)
# Parameters after initialization
plot_contours(data, initial_means, initial_covs, 'Initial clusters')
# Parameters after 12 iterations
results = EM(data, initial_means, initial_covs, initial_weights, maxiter=12)
plot_contours(data, results['means'], results['covs'], 'Clusters after 12 iterations')
# Parameters after running EM to convergence
results = EM(data, initial_means, initial_covs, initial_weights)
plot_contours(data, results['means'], results['covs'], 'Final clusters')
# Log-likelihood plot
loglikelihoods = results['loglik']
plt.plot(range(len(loglikelihoods)), loglikelihoods, linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Load image data
images = sframe.SFrame('../data/Week04/images.sf/')
images['rgb'] = images.pack_columns(['red', 'green', 'blue'])['X4']
# Run EM on image data
np.random.seed(1)
# Initalize parameters
init_means = [images['rgb'][x] for x in np.random.choice(len(images), 4, replace=False)]
cov = np.diag([images['red'].var(), images['green'].var(), images['blue'].var()])
init_covariances = [cov, cov, cov, cov]
init_weights = [1/4., 1/4., 1/4., 1/4.]
# Convert rgb data to numpy arrays
img_data = [np.array(i) for i in images['rgb']]
# Run our EM algorithm on the image data using the above initializations.
# This should converge in about 125 iterations
out = EM(img_data, init_means, init_covariances, init_weights)
# Log-likelihood plot
ll = out['loglik']
plt.plot(range(len(ll)),ll,linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure()
plt.plot(range(10,len(ll)),ll[10:],linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Visualize evolution of responsibility
N, K = out['resp'].shape
random_resp = np.random.dirichlet(np.ones(K), N)
plot_responsibilities_in_RB(images, random_resp, 'Random responsibilities')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=1)
plot_responsibilities_in_RB(images, out['resp'], 'After 1 iteration')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=20)
plot_responsibilities_in_RB(images, out['resp'], 'After 20 iterations')
# Interpreting clusters
weights = out['weights']
means = out['means']
covariances = out['covs']
rgb = images['rgb']
N = len(images) # number of images
K = len(means) # number of clusters
assignments = [0]*N
probs = [0]*N
for i in range(N):
# Compute the score of data point i under each Gaussian component:
p = np.zeros(K)
for k in range(K):
p[k] = weights[k]*multivariate_normal.pdf(rgb[i], mean=means[k], cov=covariances[k])
# Compute assignments of each data point to a given cluster based on the above scores:
assignments[i] = np.argmax(p)
# For data point i, store the corresponding score under this cluster assignment:
probs[i] = np.max(p)
assignments = sframe.SFrame({'assignments':assignments, 'probs':probs, 'image': images['image']})
for idx in range(4):
get_top_images(assignments, idx)
for component_id in range(4):
print 'Component {0:d}'.format(component_id)
images = get_top_images(assignments, component_id)
save_images(images, 'component_{0:d}_%d.jpg'.format(component_id))
print '\n'
|
neogi/machine-learning
|
clustering_and_retrieval/gaussian_mixture_model/em-gmm.py
|
Python
|
gpl-3.0
| 11,332
|
[
"Gaussian"
] |
af6bdcf4aaa639a3ec52f25e8189dd76a1c3bad5d524a463438c036a208dc316
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import unittest
import vtk
from PyQt5 import QtCore, QtWidgets
from peacock.ExodusViewer.plugins.BlockPlugin import main
from peacock.utils import Testing
class TestBlockPlugin(Testing.PeacockImageTestCase):
"""
Testing for BlockControl widget.
"""
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates a window attached to BlockControls widget.
"""
# The file to open
self._filenames = Testing.get_chigger_input_list('mug_blocks_out.e', 'vector_out.e', 'displace.e')
self._widget, self._window = main(size=[600,600])
self._widget.FilePlugin.onSetFilenames(self._filenames)
self._widget.FilePlugin.VariableList.setCurrentIndex(2)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(2)
camera = vtk.vtkCamera()
camera.SetViewUp(-0.7786, 0.2277, 0.5847)
camera.SetPosition(9.2960, -0.4218, 12.6685)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
self._window.onCameraChanged(camera.GetViewUp(), camera.GetPosition(), camera.GetFocalPoint())
self._window.onWindowRequiresUpdate()
def testBlocks(self):
"""
Test the block selection.
"""
# By default all blocks should be selected
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1', '76'])
# Uncheck a block
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(2)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1'])
self.assertImage('testBlocks.png')
# Uncheck "all"
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), None)
self.assertImage('testBlocksEmpty.png')
# Check "all"
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1', '76'])
self.assertImage('testBlocksAll.png')
def testSidesets(self):
"""
Test the sidesets selection.
"""
# By default no sidesets should be selected
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Uncheck block and select "all" the sidesets
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), None)
item = self._widget.BlockPlugin.SidesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.SidesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), sorted(['top', 'bottom']))
self.assertImage('testSidesetsAll.png')
# Uncheck a sideset
item = self._widget.BlockPlugin.SidesetSelector.StandardItemModel.item(1)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.SidesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), ['top'])
self.assertImage('testSidesets.png')
# Uncheck "all"
item = self._widget.BlockPlugin.SidesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.SidesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
self.assertImage('testBlocksEmpty.png')
# Check "all"
item = self._widget.BlockPlugin.SidesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.SidesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), sorted(['top', 'bottom']))
self.assertImage('testSidesetsAll.png')
def testNodesets(self):
"""
Test the nodesets selection.
"""
# By default no nodesets should be selected
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
# Uncheck block and select "all" the nodesets
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), None)
item = self._widget.BlockPlugin.NodesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.NodesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), ['1', '2'])
self.assertImage('testNodesetsAll.png', allowed=0.97)
# Uncheck a nodeet
item = self._widget.BlockPlugin.NodesetSelector.StandardItemModel.item(1)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.NodesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), ['2'])
self.assertImage('testNodesets.png')
# Uncheck "all"
item = self._widget.BlockPlugin.NodesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.NodesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertImage('testBlocksEmpty.png')
# Check "all"
item = self._widget.BlockPlugin.NodesetSelector.StandardItemModel.item(0)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.NodesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), ['1', '2'])
self.assertImage('testNodesetsAll.png', allowed=0.97)
def testState(self):
"""
Test that state is stored with variable changes.
"""
# Initial state
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1', '76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Disable a block
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(1)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Change to 'convected' and check that all blocks are selected
self._widget.FilePlugin.VariableList.setCurrentIndex(1)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(1)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1', '76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Disable a block and select a sideset
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(2)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
item = self._widget.BlockPlugin.SidesetSelector.StandardItemModel.item(2)
item.setCheckState(QtCore.Qt.Checked)
self._widget.BlockPlugin.SidesetSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1'])
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), ['top'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
# Go back to first item
self._widget.FilePlugin.VariableList.setCurrentIndex(2)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(2)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Back to convected
self._widget.FilePlugin.VariableList.setCurrentIndex(1)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(1)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1'])
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), ['top'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
def testState2(self):
"""
Test state change with changing filename.
"""
# Initial state
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['1', '76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Disable a block
item = self._widget.BlockPlugin.BlockSelector.StandardItemModel.item(1)
item.setCheckState(QtCore.Qt.Unchecked)
self._widget.BlockPlugin.BlockSelector.StandardItemModel.itemChanged.emit(item)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
# Change files
self._widget.FilePlugin.FileList.setCurrentIndex(1)
self._widget.FilePlugin.FileList.currentIndexChanged.emit(1)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['0'])
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
# Change files back
self._widget.FilePlugin.FileList.setCurrentIndex(0)
self._widget.FilePlugin.FileList.currentIndexChanged.emit(0)
self.assertEqual(self._widget.BlockPlugin.BlockSelector.getBlocks(), ['76'])
self.assertEqual(self._widget.BlockPlugin.NodesetSelector.getBlocks(), None)
self.assertEqual(self._widget.BlockPlugin.SidesetSelector.getBlocks(), None)
def testElementalVariable(self):
"""
Test that elemental variables disable boundary/nodeset
"""
self.assertTrue(self._widget.BlockPlugin.BlockSelector.isEnabled())
self.assertTrue(self._widget.BlockPlugin.SidesetSelector.isEnabled())
self.assertTrue(self._widget.BlockPlugin.NodesetSelector.isEnabled())
self._widget.FilePlugin.VariableList.setCurrentIndex(0)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(0)
self.assertTrue(self._widget.BlockPlugin.BlockSelector.isEnabled())
self.assertFalse(self._widget.BlockPlugin.SidesetSelector.isEnabled())
self.assertFalse(self._widget.BlockPlugin.NodesetSelector.isEnabled())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
nuclear-wizard/moose
|
python/peacock/tests/exodus_tab/test_BlockPlugin.py
|
Python
|
lgpl-2.1
| 12,514
|
[
"MOOSE",
"VTK"
] |
b2fef808836ee10e75ec8455007c061068849526f5c18e59808afec8b1b99000
|
"""
===========
gaussfitter
===========
.. codeauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com> 3/17/08
Latest version available at <http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py>
As of January 30, 2014, gaussfitter has its own code repo on github:
https://github.com/keflavich/gaussfitter
"""
import numpy as np
from numpy.ma import median
from numpy import pi
#from scipy import optimize,stats,pi
from .mpfit import mpfit
"""
Note about mpfit/leastsq:
I switched everything over to the Markwardt mpfit routine for a few reasons,
but foremost being the ability to set limits on parameters, not just force them
to be fixed. As far as I can tell, leastsq does not have that capability.
The version of mpfit I use can be found here:
http://code.google.com/p/agpy/source/browse/trunk/mpfit
Alternative: lmfit
.. todo::
-turn into a class instead of a collection of objects
-implement WCS-based gaussian fitting with correct coordinates
"""
def moments(data,circle,rotate,vheight,estimator=median,**kwargs):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=np.ma.median
"""
total = np.abs(data).sum()
Y, X = np.indices(data.shape) # python convention: reverse x,y np.indices
y = np.argmax((X*np.abs(data)).sum(axis=1)/total)
x = np.argmax((Y*np.abs(data)).sum(axis=0)/total)
col = data[int(y),:]
# FIRST moment, not second!
width_x = np.sqrt(np.abs((np.arange(col.size)-y)*col).sum()/np.abs(col).sum())
row = data[:, int(x)]
width_y = np.sqrt(np.abs((np.arange(row.size)-x)*row).sum()/np.abs(row).sum())
width = ( width_x + width_y ) / 2.
height = estimator(data.ravel())
amplitude = data.max()-height
mylist = [amplitude,x,y]
if np.isnan(width_y) or np.isnan(width_x) or np.isnan(height) or np.isnan(amplitude):
raise ValueError("something is nan")
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
# also, circles don't rotate.
else:
mylist = mylist + [width]
return mylist
def twodgaussian(inpars, circle=False, rotate=True, vheight=True, shape=None):
"""Returns a 2d gaussian function of the form:
x' = np.cos(rota) * x - np.sin(rota) * y
y' = np.sin(rota) * x + np.cos(rota) * y
(rota should be in degrees)
g = b + a * np.exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
inpars = [b,a,center_x,center_y,width_x,width_y,rota]
(b is background height, a is peak amplitude)
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters
unp.sing the following options:
circle=0 - default is an elliptical gaussian (different x, y
widths), but can reduce the input by one parameter if it's a
circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can
remove last parameter by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
shape=None - if shape is set (to a 2-parameter list) then returns
an image with the gaussian defined by inpars
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_y, center_x = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
rotate = 0
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * np.cos(rota) - center_y * np.sin(rota)
rcen_y = center_x * np.sin(rota) + center_y * np.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + \
" circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * np.cos(rota) - y * np.sin(rota)
yp = x * np.sin(rota) + y * np.cos(rota)
else:
xp = x
yp = y
g = height+amplitude*np.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
if shape is not None:
return rotgauss(*np.indices(shape))
else:
return rotgauss
def gaussfit(data,err=None,params=(),autoderiv=True,return_all=False,circle=False,
fixed=np.repeat(False,7),limitedmin=[False,False,False,False,True,True,True],
limitedmax=[False,False,False,False,False,False,True],
usemoment=np.array([],dtype='bool'),
minpars=np.repeat(0,7),maxpars=[0,0,0,0,0,0,360],
rotate=1,vheight=1,quiet=True,returnmp=False,
returnfitimage=False,**kwargs):
"""
Gaussian fitter with the ability to fit a variety of different forms of
2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the
alternative is to us an analytic derivative with lmdif.f: this method
is less robust)
return_all=0 - Default is to return only the Gaussian parameters.
1 - fit params, fit error
returnfitimage - returns (best fit params,best fit image)
returnmp - returns the full mpfit struct
circle=0 - default is an elliptical gaussian (different x, y widths),
but can reduce the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove
last parameter by setting rotate=0. np.expects angle in DEGREES
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
usemoment - can choose which parameters to use a moment estimation for.
Other parameters will be taken from params. Needs to be a boolean
array.
Output:
Default output is a set of Gaussian parameters with the same shape as
the input parameters
If returnfitimage=True returns a np array of a gaussian
contructed using the best fit parameters.
If returnmp=True returns a `mpfit` object. This object contains
a `covar` attribute which is the 7x7 covariance array
generated by the mpfit class in the `mpfit_custom.py`
module. It contains a `param` attribute that contains a
list of the best fit parameters in the same order as the
optional input parameter `params`.
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
usemoment=np.array(usemoment,dtype='bool')
params=np.array(params,dtype='float')
if usemoment.any() and len(params)==len(usemoment):
moment = np.array(moments(data,circle,rotate,vheight,**kwargs),dtype='float')
params[usemoment] = moment[usemoment]
elif params == [] or len(params)==0:
params = (moments(data,circle,rotate,vheight,**kwargs))
if vheight==0:
vheight=1
params = np.concatenate([[0],params])
fixed[0] = 1
# mpfit will fail if it is given a start parameter outside the allowed range:
for i in xrange(len(params)):
if params[i] > maxpars[i] and limitedmax[i]: params[i] = maxpars[i]
if params[i] < minpars[i] and limitedmin[i]: params[i] = minpars[i]
if err is None:
errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)\
(*np.indices(data.shape)) - data))
else:
errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)\
(*np.indices(data.shape)) - data)/err)
def mpfitfun(data,err):
if err is None:
def f(p,fjac=None): return [0,np.ravel(data-twodgaussian(p,circle,rotate,vheight)\
(*np.indices(data.shape)))]
else:
def f(p,fjac=None): return [0,np.ravel((data-twodgaussian(p,circle,rotate,vheight)\
(*np.indices(data.shape)))/err)]
return f
parinfo = [
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"XSHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"YSHIFT",'error':0},
{'n':4,'value':params[4],'limits':[minpars[4],maxpars[4]],'limited':[limitedmin[4],limitedmax[4]],'fixed':fixed[4],'parname':"XWIDTH",'error':0} ]
if vheight == 1:
parinfo.insert(0,{'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0})
if circle == 0:
parinfo.append({'n':5,'value':params[5],'limits':[minpars[5],maxpars[5]],'limited':[limitedmin[5],limitedmax[5]],'fixed':fixed[5],'parname':"YWIDTH",'error':0})
if rotate == 1:
parinfo.append({'n':6,'value':params[6],'limits':[minpars[6],maxpars[6]],'limited':[limitedmin[6],limitedmax[6]],'fixed':fixed[6],'parname':"ROTATION",'error':0})
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less
# efficient and useful. I only bothered putting it here because I was
# instructed to do so for a class project - please ask if you would
# like this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
# p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction,\
# params, full_output=1)
mp = mpfit(mpfitfun(data,err),parinfo=parinfo,quiet=quiet)
if returnmp:
returns = (mp)
elif return_all == 0:
returns = mp.params
elif return_all == 1:
returns = mp.params,mp.perror
if returnfitimage:
fitimage = twodgaussian(mp.params,circle,rotate,vheight)(*np.indices(data.shape))
returns = (returns,fitimage)
return returns
def onedmoments(Xax,data,vheight=True,estimator=median,negamp=None,
veryverbose=False, **kwargs):
"""Returns (height, amplitude, x, width_x)
the gaussian parameters of a 1D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=np.ma.median
'estimator' is used to measure the background level (height)
negamp can be used to force the peak negative (True), positive (False),
or it will be "autodetected" (negamp=None)
"""
dx = np.mean(Xax[1:] - Xax[:-1]) # assume a regular grid
integral = (data*dx).sum()
height = estimator(data)
# try to figure out whether pos or neg based on the minimum width of the pos/neg peaks
Lpeakintegral = integral - height*len(Xax)*dx - (data[data>height]*dx).sum()
Lamplitude = data.min()-height
Lwidth_x = 0.5*(np.abs(Lpeakintegral / Lamplitude))
Hpeakintegral = integral - height*len(Xax)*dx - (data[data<height]*dx).sum()
Hamplitude = data.max()-height
Hwidth_x = 0.5*(np.abs(Hpeakintegral / Hamplitude))
Lstddev = Xax[data<data.mean()].std()
Hstddev = Xax[data>data.mean()].std()
#print "Lstddev: %10.3g Hstddev: %10.3g" % (Lstddev,Hstddev)
#print "Lwidth_x: %10.3g Hwidth_x: %10.3g" % (Lwidth_x,Hwidth_x)
if negamp: # can force the guess to be negative
xcen,amplitude,width_x = Xax[np.argmin(data)],Lamplitude,Lwidth_x
elif negamp is None:
if Hstddev < Lstddev:
xcen,amplitude,width_x, = Xax[np.argmax(data)],Hamplitude,Hwidth_x
else:
xcen,amplitude,width_x, = Xax[np.argmin(data)],Lamplitude,Lwidth_x
else: # if negamp==False, make positive
xcen,amplitude,width_x = Xax[np.argmax(data)],Hamplitude,Hwidth_x
if veryverbose:
print "negamp: %s amp,width,cen Lower: %g, %g Upper: %g, %g Center: %g" %\
(negamp,Lamplitude,Lwidth_x,Hamplitude,Hwidth_x,xcen)
mylist = [amplitude,xcen,width_x]
if np.isnan(width_x) or np.isnan(height) or np.isnan(amplitude):
raise ValueError("something is nan")
if vheight:
mylist = [height] + mylist
return mylist
def onedgaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*np.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*np.exp(-(x-dx)**2/(2*w**2))
def onedgaussfit(xax, data, err=None,
params=[0,1,0,1],fixed=[False,False,False,False],
limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False], minpars=[0,0,0,0],
maxpars=[0,0,0,0], quiet=True, shh=True,
veryverbose=False,
vheight=True, negamp=False,
usemoments=False):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
usemoments - replace default parameters with moments
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))/err]
return f
if xax == None:
xax = np.arange(len(data))
if vheight is False:
height = params[0]
fixed[0] = True
if usemoments:
params = onedmoments(xax,data,vheight=vheight,negamp=negamp, veryverbose=veryverbose)
if vheight is False: params = [height]+params
if veryverbose: print "OneD moments: h: %g a: %g c: %g w: %g" % tuple(params)
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if (not shh) or veryverbose:
print "Fit status: ",mp.status
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedgaussian(xax,*mpp),mpperr,chi2
def n_gaussian(pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = np.zeros(len(x))
for i in range(len(dx)):
v += a[i] * np.exp( - ( x - dx[i] )**2 / (2.0*sigma[i]**2) )
return v
return g
def multigaussfit(xax, data, ngauss=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False):
"""
An improvement on onedgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
ngauss - How many gaussians to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if isinstance(params,np.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != 3*ngauss:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == 3:
parlist *= ngauss
elif parlist==params:
parlist[:] = [1,0,1] * ngauss
elif parlist==fixed or parlist==limitedmax:
parlist[:] = [False,False,False] * ngauss
elif parlist==limitedmin:
parlist[:] = [False,False,True] * ngauss
elif parlist==minpars or parlist==maxpars:
parlist[:] = [0,0,0] * ngauss
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))/err]
return f
if xax == None:
xax = np.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii%3), 'error':ii}
for ii in xrange(len(params)) ]
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,n_gaussian(pars=mpp)(xax),mpperr,chi2
def collapse_gaussfit(cube,xax=None,axis=2,negamp=False,usemoments=True,nsigcut=1.0,mppsigcut=1.0,
return_errors=False, **kwargs):
import time
std_coll = cube.std(axis=axis)
std_coll[std_coll==0] = np.nan # must eliminate all-zero spectra
mean_std = median(std_coll[std_coll==std_coll])
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = np.zeros(cube.shape[1:]) + np.nan
amp_arr = np.zeros(cube.shape[1:]) + np.nan
chi2_arr = np.zeros(cube.shape[1:]) + np.nan
offset_arr = np.zeros(cube.shape[1:]) + np.nan
width_err = np.zeros(cube.shape[1:]) + np.nan
amp_err = np.zeros(cube.shape[1:]) + np.nan
offset_err = np.zeros(cube.shape[1:]) + np.nan
if xax is None:
xax = np.arange(cube.shape[0])
starttime = time.time()
print "Cube shape: ",cube.shape
if negamp: extremum=np.min
else: extremum=np.max
print "Fitting a total of %i spectra with peak signal above %f" % ((np.abs(extremum(cube,axis=0)) > (mean_std*nsigcut)).sum(),mean_std*nsigcut)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (np.abs(extremum(cube[:,i,:],axis=0)) > (mean_std*nsigcut)).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if np.abs(extremum(cube[:,i,j])) > (mean_std*nsigcut):
mpp,gfit,mpperr,chi2 = onedgaussfit(xax,cube[:,i,j],err=np.ones(cube.shape[0])*mean_std,negamp=negamp,usemoments=usemoments,**kwargs)
if np.abs(mpp[1]) > (mpperr[1]*mppsigcut):
width_arr[i,j] = mpp[3]
offset_arr[i,j] = mpp[2]
chi2_arr[i,j] = chi2
amp_arr[i,j] = mpp[1]
width_err[i,j] = mpperr[3]
offset_err[i,j] = mpperr[2]
amp_err[i,j] = mpperr[1]
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print "in %f seconds" % (dt)
print "Total time %f seconds" % (time.time()-starttime)
if return_errors:
return width_arr,offset_arr,amp_arr,width_err,offset_err,amp_err,chi2_arr
else:
return width_arr,offset_arr,amp_arr,chi2_arr
|
ufoym/agpy
|
agpy/gaussfitter.py
|
Python
|
mit
| 23,846
|
[
"Gaussian"
] |
d27e56a21faa2187c9089eaea1ba43fae372f91282977eb1718105bbaedd51e8
|
#!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: package_facts
short_description: Package information as facts
description:
- Return information about installed packages as facts.
options:
manager:
description:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
- The 'apk' option was added in version 2.11.
default: ['auto']
choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman', 'apk']
type: list
elements: str
strategy:
description:
- This option controls how the module queries the package managers on the system.
C(first) means it will return only information for the first supported package manager available.
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
type: str
version_added: "2.8"
version_added: "2.5"
requirements:
- For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
- For Debian-based systems C(python-apt) package must be installed on targeted hosts.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
notes:
- Supports C(check_mode).
'''
EXAMPLES = '''
- name: Gather the package facts
ansible.builtin.package_facts:
manager: auto
- name: Print the package facts
ansible.builtin.debug:
var: ansible_facts.packages
- name: Check whether a package called foobar is installed
ansible.builtin.debug:
msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
when: "'foobar' in ansible_facts.packages"
'''
RETURN = '''
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: complex
contains:
packages:
description:
- Maps the package name to a non-empty list of dicts with package information.
- Every dict in the list corresponds to one installed version of the package.
- The fields described below are present for all package managers. Depending on the
package manager, there might be more fields for a package.
returned: when operating system level package manager is specified or auto detected manager
type: dict
contains:
name:
description: The package's name.
returned: always
type: str
version:
description: The package's version.
returned: always
type: str
source:
description: Where information on the package came from.
returned: always
type: str
sample: |-
{
"packages": {
"kernel": [
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
...
],
"kernel-tools": [
{
"name": "kernel-tools",
"source": "rpm",
"version": "3.10.0",
...
}
],
...
}
}
# Sample rpm
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
# Sample deb
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
import re
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
class RPM(LibMgr):
LIB = 'rpm'
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
def is_available(self):
''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
we_have_lib = super(RPM, self).is_available()
try:
get_bin_path('rpm')
if not we_have_lib and not has_respawned():
# try to locate an interpreter with the necessary lib
interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python2']
interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
if interpreter_path:
respawn_module(interpreter_path)
# end of the line for this process; this module will exit when the respawned copy completes
if not we_have_lib:
module.warn('Found "rpm" but %s' % (missing_required_lib(self.LIB)))
except ValueError:
pass
return we_have_lib
class APT(LibMgr):
LIB = 'apt'
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def is_available(self):
''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
we_have_lib = super(APT, self).is_available()
if not we_have_lib:
for exe in ('apt', 'apt-get', 'aptitude'):
try:
get_bin_path(exe)
except ValueError:
continue
else:
if not has_respawned():
# try to locate an interpreter with the necessary lib
interpreters = ['/usr/bin/python3',
'/usr/bin/python2']
interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
if interpreter_path:
respawn_module(interpreter_path)
# end of the line for this process; this module will exit here when respawned copy completes
module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
return we_have_lib
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PACMAN(CLIMgr):
CLI = 'pacman'
def list_installed(self):
locale = get_best_parsable_locale(module)
rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL=locale))
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.split("\n\n")[:-1]
def get_package_details(self, package):
# parse values of details that might extend over several lines
raw_pkg_details = {}
last_detail = None
for line in package.splitlines():
m = re.match(r"([\w ]*[\w]) +: (.*)", line)
if m:
last_detail = m.group(1)
raw_pkg_details[last_detail] = m.group(2)
else:
# append value to previous detail
raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
provides = None
if raw_pkg_details['Provides'] != 'None':
provides = [
p.split('=')[0]
for p in raw_pkg_details['Provides'].split(' ')
]
return {
'name': raw_pkg_details['Name'],
'version': raw_pkg_details['Version'],
'arch': raw_pkg_details['Architecture'],
'provides': provides,
}
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(int(pkg['automatic']))
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(int(pkg['vital']))
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
class APK(CLIMgr):
CLI = 'apk'
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'info', '-v'])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
raw_pkg_details = {'name': package, 'version': '', 'release': ''}
nvr = package.rsplit('-', 2)
try:
return {
'name': nvr[0],
'version': nvr[1],
'release': nvr[2],
}
except IndexError:
return raw_pkg_details
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# start work
global module
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if found and strategy == 'first':
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
try:
try:
# manager throws exception on init (calls self.test) if not usable.
manager = PKG_MANAGERS[pkgmgr]()
if manager.is_available():
found += 1
packages.update(manager.get_packages())
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
continue
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
thnee/ansible
|
lib/ansible/modules/package_facts.py
|
Python
|
gpl-3.0
| 16,572
|
[
"Brian"
] |
e7613590a239e858f7a852586dff520578e1cfec8922755e5b6792d1c6f77697
|
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] E. Parzen. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] Mattes, D., Haynor, D. R., Vesselle, H., Lewellen, T. K.,
& Eubank, W. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from ..core.optimize import Optimizer
from ..core.optimize import SCIPY_LESS_0_12
from . import vector_fields as vf
from . import VerbosityLevels
from .parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from .imwarp import (get_direction_and_spacings, ScaleSpace)
from .scalespace import IsotropicScaleSpace
from warnings import warn
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
class AffineInversionError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
self.affine = affine
if self.affine is None:
self.affine_inv = None
return
if np.any(np.isnan(affine)):
raise AffineInversionError('Affine contains invalid elements')
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image dimension
if dim < 2 or dim > 3:
raise ValueError('Undefined transform for dimension: %d' % (dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned =\
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the static image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the static
image at the `n` sampling points. If dense sampling is being used,
then the intensities are given directly by the static image,
whose shape is (S, R, C) in the 3D case or (R, C) in the 2D case.
moving_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the moving image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the moving
image at the `n` sampling points (mapped to the moving space by the
current affine transform). If dense sampling is being used,
then the intensities are given by the moving imaged linearly
transformed towards the static image by the current affine, which
results in an image of the same shape as the static image.
"""
static_values = None
moving_values = None
if self.sampling_proportion is None: # Dense case
static_values = self.static
moving_values = self.affine_map.transform(self.moving)
self.histogram.update_pdfs_dense(static_values, moving_values)
else: # Sparse case
sp_to_moving = self.moving_world2grid.dot(self.affine_map.affine)
pts = sp_to_moving.dot(self.samples.T).T # Points on moving grid
pts = pts[..., :self.dim]
self.moving_vals, inside = self.interp_method(self.moving, pts)
self.moving_vals = np.array(self.moving_vals)
static_values = self.static_vals
moving_values = self.moving_vals
self.histogram.update_pdfs_sparse(static_values, moving_values)
return static_values, moving_values
def _update_mutual_information(self, params, update_gradient=True):
r""" Updates marginal and joint distributions and the joint gradient
The distributions are updated according to the static and transformed
images. The transformed image is precisely the moving image after
transforming it by the transform defined by the `params` parameters.
The gradient of the joint PDF is computed only if update_gradient
is True.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
update_gradient : Boolean, optional
if True, the gradient of the joint PDF will also be computed,
otherwise, only the marginal and joint PDFs will be computed.
The default is True.
"""
# Get the matrix associated with the `params` parameter vector
current_affine = self.transform.param_to_matrix(params)
# Get the static-to-prealigned matrix (only needed for the MI gradient)
static2prealigned = self.static_grid2world
if self.starting_affine is not None:
current_affine = current_affine.dot(self.starting_affine)
static2prealigned = self.starting_affine.dot(static2prealigned)
self.affine_map.set_affine(current_affine)
# Update the histogram with the current joint intensities
static_values, moving_values = self._update_histogram()
H = self.histogram # Shortcut to `self.histogram`
grad = None # Buffer to write the MI gradient into (if needed)
if update_gradient:
# Re-allocate buffer for the gradient, if needed
n = params.shape[0] # Number of parameters
if (self.metric_grad is None) or (self.metric_grad.shape[0] != n):
self.metric_grad = np.empty(n)
grad = self.metric_grad
# Compute the gradient of the joint PDF w.r.t. parameters
if self.sampling_proportion is None: # Dense case
# Compute the gradient of moving img. at physical points
# associated with the >>static image's grid<< cells
# The image gradient must be eval. at current moved points
grid_to_world = current_affine.dot(self.static_grid2world)
mgrad, inside = vf.gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
self.static.shape,
grid_to_world)
# The Jacobian must be evaluated at the pre-aligned points
H.update_gradient_dense(params, self.transform, static_values,
moving_values, static2prealigned, mgrad)
else: # Sparse case
# Compute the gradient of moving at the sampling points
# which are already given in physical space coordinates
pts = current_affine.dot(self.samples.T).T # Moved points
mgrad, inside = vf.sparse_gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
pts)
# The Jacobian must be evaluated at the pre-aligned points
pts = self.samples_prealigned[..., :self.dim]
H.update_gradient_sparse(params, self.transform, static_values,
moving_values, pts, mgrad)
# Call the cythonized MI computation with self.histogram fields
self.metric_val = compute_parzen_mi(H.joint, H.joint_grad,
H.smarginal, H.mmarginal,
grad)
def distance(self, params):
r""" Numeric value of the negative Mutual Information
We need to change the sign so we can use standard minimization
algorithms.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
"""
try:
self._update_mutual_information(params, False)
except AffineInversionError:
return np.inf
return -1 * self.metric_val
def gradient(self, params):
r""" Numeric value of the metric's gradient at the given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return 0 * self.metric_grad
return -1 * self.metric_grad
def distance_and_gradient(self, params):
r""" Numeric value of the metric and its gradient at given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
neg_mi_grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return np.inf, 0 * self.metric_grad
return -1 * self.metric_val, -1 * self.metric_grad
class AffineRegistration(object):
def __init__(self,
metric=None,
level_iters=None,
sigmas=None,
factors=None,
method='L-BFGS-B',
ss_sigma_factor=None,
options=None):
r""" Initializes an instance of the AffineRegistration class
Parameters
----------
metric : None or object, optional
an instance of a metric. The default is None, implying
the Mutual Information metric with default settings.
level_iters : sequence, optional
the number of iterations at each scale of the scale space.
`level_iters[0]` corresponds to the coarsest scale,
`level_iters[-1]` the finest, where n is the length of the
sequence. By default, a 3-level scale space with iterations
sequence equal to [10000, 1000, 100] will be used.
sigmas : sequence of floats, optional
custom smoothing parameter to build the scale space (one parameter
for each scale). By default, the sequence of sigmas will be
[3, 1, 0].
factors : sequence of floats, optional
custom scale factors to build the scale space (one factor for each
scale). By default, the sequence of factors will be [4, 2, 1].
method : string, optional
optimization method to be used. If Scipy version < 0.12, then
only L-BFGS-B is available. Otherwise, `method` can be any
gradient-based method available in `dipy.core.Optimize`: CG, BFGS,
Newton-CG, dogleg or trust-ncg.
The default is 'L-BFGS-B'.
ss_sigma_factor : float, optional
If None, this parameter is not used and an isotropic scale
space with the given `factors` and `sigmas` will be built.
If not None, an anisotropic scale space will be used by
automatically selecting the smoothing sigmas along each axis
according to the voxel dimensions of the given image.
The `ss_sigma_factor` is used to scale the automatically computed
sigmas. For example, in the isotropic case, the sigma of the
kernel will be $factor * (2 ^ i)$ where
$i = 1, 2, ..., n_scales - 1$ is the scale (the finest resolution
image $i=0$ is never smoothed). The default is None.
options : dict, optional
extra optimization options. The default is None, implying
no extra options are passed to the optimizer.
"""
self.metric = metric
if self.metric is None:
self.metric = MutualInformationMetric()
if level_iters is None:
level_iters = [10000, 1000, 100]
self.level_iters = level_iters
self.levels = len(level_iters)
if self.levels == 0:
raise ValueError('The iterations sequence cannot be empty')
self.options = options
self.method = method
if ss_sigma_factor is not None:
self.use_isotropic = False
self.ss_sigma_factor = ss_sigma_factor
else:
self.use_isotropic = True
if factors is None:
factors = [4, 2, 1]
if sigmas is None:
sigmas = [3, 1, 0]
self.factors = factors
self.sigmas = sigmas
self.verbosity = VerbosityLevels.STATUS
def _init_optimizer(self, static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. The
dimensions of the static (S, R, C) and moving (S', R', C') images
do not need to be the same.
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the moving image
starting_affine : string, or matrix, or None
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1)
If None:
Start from identity
"""
self.dim = len(static.shape)
self.transform = transform
n = transform.get_number_of_parameters()
self.nparams = n
if params0 is None:
params0 = self.transform.get_identity_parameters()
self.params0 = params0
if starting_affine is None:
self.starting_affine = np.eye(self.dim + 1)
elif starting_affine == 'mass':
affine_map = transform_centers_of_mass(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'voxel-origin':
affine_map = transform_origins(static, static_grid2world,
moving, moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'centers':
affine_map = transform_geometric_centers(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif (isinstance(starting_affine, np.ndarray) and
starting_affine.shape >= (self.dim, self.dim + 1)):
self.starting_affine = starting_affine
else:
raise ValueError('Invalid starting_affine matrix')
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
static = ((static.astype(np.float64) - static.min()) /
(static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
(moving.max() - moving.min()))
# Build the scale space of the input images
if self.use_isotropic:
self.moving_ss = IsotropicScaleSpace(moving, self.factors,
self.sigmas,
moving_grid2world,
moving_spacing, False)
self.static_ss = IsotropicScaleSpace(static, self.factors,
self.sigmas,
static_grid2world,
static_spacing, False)
else:
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
False)
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
False)
def optimize(self, static, moving, transform, params0,
static_grid2world=None, moving_grid2world=None,
starting_affine=None):
r''' Starts the optimization process
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. It is
necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the
'starting_affine' matrix
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the static
image. The default is None, implying the transform is the
identity.
moving_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the moving
image. The default is None, implying the transform is the
identity.
starting_affine : string, or matrix, or None, optional
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1).
If None:
Start from identity.
The default is None.
Returns
-------
affine_map : instance of AffineMap
the affine resulting affine transformation
'''
self._init_optimizer(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine)
del starting_affine # Now we must refer to self.starting_affine
# Multi-resolution iterations
original_static_shape = self.static_ss.get_image(0).shape
original_static_grid2world = self.static_ss.get_affine(0)
original_moving_shape = self.moving_ss.get_image(0).shape
original_moving_grid2world = self.moving_ss.get_affine(0)
affine_map = AffineMap(None,
original_static_shape,
original_static_grid2world,
original_moving_shape,
original_moving_grid2world)
for level in range(self.levels - 1, -1, -1):
self.current_level = level
max_iter = self.level_iters[-1 - level]
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d [max iter: %d]' % (level, max_iter))
# Resample the smooth static image to the shape of this level
smooth_static = self.static_ss.get_image(level)
current_static_shape = self.static_ss.get_domain_shape(level)
current_static_grid2world = self.static_ss.get_affine(level)
current_affine_map = AffineMap(None,
current_static_shape,
current_static_grid2world,
original_static_shape,
original_static_grid2world)
current_static = current_affine_map.transform(smooth_static)
# The moving image is full resolution
current_moving_grid2world = original_moving_grid2world
current_moving = self.moving_ss.get_image(level)
# Prepare the metric for iterations at this resolution
self.metric.setup(transform, current_static, current_moving,
current_static_grid2world,
current_moving_grid2world, self.starting_affine)
# Optimize this level
if self.options is None:
self.options = {'gtol': 1e-4,
'disp': False}
if self.method == 'L-BFGS-B':
self.options['maxfun'] = max_iter
else:
self.options['maxiter'] = max_iter
if SCIPY_LESS_0_12:
# Older versions don't expect value and gradient from
# the same function
opt = Optimizer(self.metric.distance, self.params0,
method=self.method, jac=self.metric.gradient,
options=self.options)
else:
opt = Optimizer(self.metric.distance_and_gradient, self.params0,
method=self.method, jac=True,
options=self.options)
params = opt.xopt
# Update starting_affine matrix with optimal parameters
T = self.transform.param_to_matrix(params)
self.starting_affine = T.dot(self.starting_affine)
# Start next iteration at identity
self.params0 = self.transform.get_identity_parameters()
affine_map.set_affine(self.starting_affine)
return affine_map
def align_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_centers_of_mass instead."
warn(msg)
return transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world)
def align_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_geometric_centers instead."
warn(msg)
return transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world)
def align_origins(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_origins instead."
warn(msg)
return transform_origins(static, static_grid2world,
moving, moving_grid2world)
def transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the center of mass of the input images
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the center of mass of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = ndimage.measurements.center_of_mass(np.array(static))
c_static = static_grid2world.dot(c_static+(1,))
c_moving = ndimage.measurements.center_of_mass(np.array(moving))
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the geometric center of the input images
With "geometric center" of a volume we mean the physical coordinates of
its central voxel
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the geometric center of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = tuple((np.array(static.shape, dtype=np.float64)) * 0.5)
c_static = static_grid2world.dot(c_static+(1,))
c_moving = tuple((np.array(moving.shape, dtype=np.float64)) * 0.5)
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_origins(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the origins of the input images
With "origin" of a volume we mean the physical coordinates of
voxel (0,0,0)
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the origin of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = static_grid2world[:dim, dim]
c_moving = moving_grid2world[:dim, dim]
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
|
sinkpoint/dipy
|
dipy/align/imaffine.py
|
Python
|
bsd-3-clause
| 52,930
|
[
"Gaussian"
] |
390455c253aa4aa178a794a4686902f7e4d629d3f436c983141e75f10cab18c8
|
"""
Module focused on the implementation of the Radial Basis Functions interpolation technique.
This technique is still based on the use of a set of parameters, the so-called control points,
as for FFD, but RBF is interpolatory. Another important key point of RBF strategy relies in the
way we can locate the control points: in fact, instead of FFD where control points need to be
placed inside a regular lattice, with RBF we hano no more limitations. So we have the possibility
to perform localized control points refiniments.
The module is analogous to the freeform one.
:Theoretical Insight:
As reference please consult M. D. Buhmann. Radial Basis Functions, volume 12 of Cambridge
monographs on applied and computational mathematics. Cambridge University Press, UK, 2003.
RBF shape parametrization technique is based on the definition of a map,
:math:`\\mathcal{M}(\\boldsymbol{x}) : \\mathbb{R}^n \\rightarrow \\mathbb{R}^n`, that allows the
possibility of transferring data across non-matching grids and facing the dynamic mesh handling.
The map introduced is defines as follows
.. math::
\\mathcal{M}(\\boldsymbol{x}) = p(\\boldsymbol{x}) + \\sum_{i=1}^{\\mathcal{N}_C} \\gamma_i
\\varphi(\\| \\boldsymbol{x} - \\boldsymbol{x_{C_i}} \\|)
where :math:`p(\\boldsymbol{x})` is a low_degree polynomial term, :math:`\\gamma_i` is the weight,
corresponding to the a-priori selected :math:`\\mathcal{N}_C` control points, associated to the
:math:`i`-th basis function, and :math:`\\varphi(\\| \\boldsymbol{x} - \\boldsymbol{x_{C_i}} \\|)`
a radial function based on the Euclidean distance between the control points position
:math:`\\boldsymbol{x_{C_i}}` and :math:`\\boldsymbol{x}`. A radial basis function, generally, is
a real-valued function whose value depends only on the distance from the origin, so that
:math:`\\varphi(\\boldsymbol{x}) = \\tilde{\\varphi}(\\| \\boldsymbol{x} \\|)`.
The matrix version of the formula above is:
.. math::
\\mathcal{M}(\\boldsymbol{x}) = \\boldsymbol{c} + \\boldsymbol{Q}\\boldsymbol{x} +
\\boldsymbol{W^T}\\boldsymbol{d}(\\boldsymbol{x})
The idea is that after the computation of the weights and the polynomial terms from the coordinates
of the control points before and after the deformation, we can deform all the points of the mesh
accordingly.
Among the most common used radial basis functions for modelling 2D and 3D shapes, we consider
Gaussian splines, Multi-quadratic biharmonic splines, Inverted multi-quadratic biharmonic splines,
Thin-plate splines and Beckert and Wendland :math:`C^2` basis all defined and implemented below.
"""
import numpy as np
class RBF(object):
"""
Class that handles the Radial Basis Functions interpolation on the mesh points.
:param RBFParameters rbf_parameters: parameters of the RBF.
:param numpy.ndarray original_mesh_points: coordinates of the original points of the mesh.
:cvar RBFParameters parameters: parameters of the RBF.
:cvar numpy.ndarray original_mesh_points: coordinates of the original points of the mesh.
The shape is `n_points`-by-3.
:cvar numpy.ndarray modified_mesh_points: coordinates of the points of the deformed mesh.
The shape is `n_points`-by-3.
:cvar dict bases: a dictionary that associates the names of the basis functions
implemented to the actual implementation.
:cvar numpy.matrix weights: the matrix formed by the weights corresponding to the a-priori
selected N control points, associated to the basis functions and c and Q terms that
describe the polynomial of order one p(x) = c + Qx. The shape is
(n_control_points+1+3)-by-3. It is computed internally.
:Example:
>>> import pygem.radial as rbf
>>> import pygem.params as rbfp
>>> import numpy as np
>>> rbf_parameters = rbfp.RBFParameters()
>>> rbf_parameters.read_parameters('tests/test_datasets/parameters_rbf_cube.prm')
>>> nx, ny, nz = (20, 20, 20)
>>> mesh = np.zeros((nx * ny * nz, 3))
>>> xv = np.linspace(0, 1, nx)
>>> yv = np.linspace(0, 1, ny)
>>> zv = np.linspace(0, 1, nz)
>>> z, y, x = np.meshgrid(zv, yv, xv)
>>> mesh = np.array([x.ravel(), y.ravel(), z.ravel()])
>>> original_mesh_points = mesh.T
>>> radial_trans = rbf.RBF(rbf_parameters, original_mesh_points)
>>> radial_trans.perform()
>>> new_mesh_points = radial_trans.modified_mesh_points
"""
def __init__(self, rbf_parameters, original_mesh_points):
self.parameters = rbf_parameters
self.original_mesh_points = original_mesh_points
self.modified_mesh_points = None
self.bases = {'gaussian_spline': self.gaussian_spline, \
'multi_quadratic_biharmonic_spline': self.multi_quadratic_biharmonic_spline, \
'inv_multi_quadratic_biharmonic_spline': self.inv_multi_quadratic_biharmonic_spline, \
'thin_plate_spline': self.thin_plate_spline, \
'beckert_wendland_c2_basis': self.beckert_wendland_c2_basis}
# to make the str callable we have to use a dictionary with all the implemented
# radial basis functions
if self.parameters.basis in self.bases:
self.basis = self.bases[self.parameters.basis]
else:
raise NameError('The name of the basis function in the parameters file is not correct ' + \
'or not implemented. Check the documentation for all the available functions.')
self.weights = self._get_weights(self.parameters.original_control_points, \
self.parameters.deformed_control_points)
@staticmethod
def gaussian_spline(X, r):
"""
It implements the following formula:
.. math::
\\varphi(\\| \\boldsymbol{x} \\|) = e^{-\\frac{\\| \\boldsymbol{x} \\|^2}{r^2}}
:param numpy.ndarray X: the vector x in the formula above.
:param float r: the parameter r in the formula above.
:return: result: the result of the formula above.
:rtype: float
"""
norm = np.linalg.norm(X)
result = np.exp(-(norm * norm) / (r * r))
return result
@staticmethod
def multi_quadratic_biharmonic_spline(X, r):
"""
It implements the following formula:
.. math::
\\varphi(\\| \\boldsymbol{x} \\|) = \\sqrt{\\| \\boldsymbol{x} \\|^2 + r^2}
:param numpy.ndarray X: the vector x in the formula above.
:param float r: the parameter r in the formula above.
:return: result: the result of the formula above.
:rtype: float
"""
norm = np.linalg.norm(X)
result = np.sqrt((norm * norm) + (r * r))
return result
@staticmethod
def inv_multi_quadratic_biharmonic_spline(X, r):
"""
It implements the following formula:
.. math::
\\varphi(\\| \\boldsymbol{x} \\|) = (\\| \\boldsymbol{x} \\|^2 + r^2 )^{-\\frac{1}{2}}
:param numpy.ndarray X: the vector x in the formula above.
:param float r: the parameter r in the formula above.
:return: result: the result of the formula above.
:rtype: float
"""
norm = np.linalg.norm(X)
result = 1.0 / (np.sqrt((norm * norm) + (r * r)))
return result
@staticmethod
def thin_plate_spline(X, r):
"""
It implements the following formula:
.. math::
\\varphi(\\| \\boldsymbol{x} \\|) = \\left\\| \\frac{\\boldsymbol{x} }{r} \\right\\|^2
\\ln \\left\\| \\frac{\\boldsymbol{x} }{r} \\right\\|
:param numpy.ndarray X: the vector x in the formula above.
:param float r: the parameter r in the formula above.
:return: result: the result of the formula above.
:rtype: float
"""
arg = X/r
norm = np.linalg.norm(arg)
result = norm * norm
if norm > 0:
result *= np.log(norm)
return result
@staticmethod
def beckert_wendland_c2_basis(X, r):
"""
It implements the following formula:
.. math::
\\varphi(\\| \\boldsymbol{x} \\|) = \\left( 1 - \\frac{\\| \\boldsymbol{x} \\|}{r} \\right)^4_+
\\left( 4 \\frac{\\| \\boldsymbol{x} \\|}{r} + 1 \\right)
:param numpy.ndarray X: the vector x in the formula above.
:param float r: the parameter r in the formula above.
:return: result: the result of the formula above.
:rtype: float
"""
norm = np.linalg.norm(X)
arg = norm / r
first = 0
if (1 - arg) > 0:
first = np.power((1 - arg), 4)
second = (4 * arg) + 1
result = first * second
return result
def _distance_matrix(self, X1, X2):
"""
This private method returns the following matrix:
:math:`\\boldsymbol{D_{ij}} = \\varphi(\\| \\boldsymbol{x_i} - \\boldsymbol{y_j} \\|)`
:param numpy.ndarray X1: the vector x in the formula above.
:param numpy.ndarray X2: the vector y in the formula above.
:return: matrix: the matrix D.
:rtype: numpy.ndarray
"""
m, n = X1.shape[0], X2.shape[0]
matrix = np.zeros(shape=(m, n))
for i in range(0, m):
for j in range(0, n):
matrix[i][j] = self.basis(X1[i] - X2[j], self.parameters.radius)
return matrix
def _get_weights(self, X, Y):
"""
This private method, given the original control points and the deformed ones, returns the matrix
with the weights and the polynomial terms, that is :math:`W`, :math:`c^T` and :math:`Q^T`.
The shape is (n_control_points+1+3)-by-3.
:param numpy.ndarray X: it is an n_control_points-by-3 array with the
coordinates of the original interpolation control points before the deformation.
:param numpy.ndarray Y: it is an n_control_points-by-3 array with the
coordinates of the interpolation control points after the deformation.
:return: weights: the matrix with the weights and the polynomial terms.
:rtype: numpy.matrix
"""
n_points = X.shape[0]
dim = X.shape[1]
identity = np.ones(n_points).reshape(n_points, 1)
dist = self._distance_matrix(X, X)
H = np.bmat([[dist, identity, X], [identity.T, np.zeros((1, 1)), np.zeros((1, dim))], \
[X.T, np.zeros((dim, 1)), np.zeros((dim, dim))]])
rhs = np.bmat([[Y], [np.zeros((1, dim))], [np.zeros((dim, dim))]])
inv_H = np.linalg.inv(H)
weights = np.dot(inv_H, rhs)
return weights
def perform(self):
"""
This method performs the deformation of the mesh points. After the execution
it sets `self.modified_mesh_points`.
"""
n_points = self.original_mesh_points.shape[0]
dist = self._distance_matrix(self.original_mesh_points, self.parameters.original_control_points)
identity = np.ones(n_points).reshape(n_points, 1)
H = np.bmat([[dist, identity, self.original_mesh_points]])
self.modified_mesh_points = np.asarray(np.dot(H, self.weights))
|
fsalmoir/PyGeM
|
pygem/radial.py
|
Python
|
mit
| 10,169
|
[
"Gaussian"
] |
7378d2068a068ab5749487d059bc34fe88fa35b2918f5447e8b37a33ce7afe91
|
#!/usr/bin/env python
"""
Predict data with CNN trained using the Lasagne library:
https://github.com/Lasagne
"""
from __future__ import print_function
import argparse
import sys
import os
import time
import csv
import numpy as np
from scipy.io import netcdf
from scipy.stats import pearsonr
from sklearn.metrics import roc_auc_score
import theano
import theano.tensor as T
import lasagne
import data_io_func
import NN_func
################################################################################
# PARSE COMMANDLINE OPTIONS
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-data', '--datafile', help="file with data to be predicted")
parser.add_argument('-data_aa', '--aafile', help="file with data to be predicted")
parser.add_argument('-ensemblelist', '--ensemblelist', help="text file containing list of hyper parameters and weight files")
parser.add_argument('-out', '--outfile', help="file to store output table")
parser.add_argument('-max_pep_seq_length', '--max_pep_seq_length', help="maximal peptide sequence length, default = -1", default=-1)
args = parser.parse_args()
# get data file:
if args.datafile != None:
datafile = args.datafile
else:
sys.stderr.write("Please specify data file!\n")
sys.exit(2)
# get data file with AA sequences:
if args.aafile != None:
aafile = args.aafile
else:
sys.stderr.write("Please specify data file with AA sequences!\n")
sys.exit(2)
# get ensemble list:
if args.ensemblelist != None:
ensemblelist = args.ensemblelist
else:
sys.stderr.write("Please specify data file with hyper parameters and weight files!\n")
sys.exit(2)
# get outputfile:
if args.outfile != None:
outfilename = args.outfile
else:
sys.stderr.write("Please specify output file!\n")
sys.exit(2)
try:
MAX_PEP_SEQ_LEN=int(args.max_pep_seq_length)
except:
sys.stderr.write("Problem with max. peptide sequence length specification (option -max_pep_seq_length)!\n")
sys.exit(2)
################################################################################
# READ ENSEMBLE FILE
################################################################################
# read list of ensembles:
ensembles=[]
with open(ensemblelist, 'rb') as infile:
ensembles = list(csv.reader(infile, delimiter='\t'))
ensembles=filter(None,ensembles)
################################################################################
# LOAD DATA
################################################################################
print("# Loading data...")
# read in data as a list of numpy ndarrays:
X_pep,X_mhc,y = data_io_func.netcdf2pep(datafile)
# get number of features:
N_FEATURES = X_pep[0].shape[1]
# get MHC pseudo sequence length (assumes they all have the same length):
MHC_SEQ_LEN = X_mhc[0].shape[0]
# get target length:
T_LEN = y[0].shape[0]
# find max peptide sequence length (if not specified)
if MAX_PEP_SEQ_LEN == -1:
MAX_PEP_SEQ_LEN = len(max(X_pep, key=len))
# save sequences as np.ndarray instead of list of np.ndarrays:
X_lstm,X_lstm_mask = data_io_func.pad_pep_mhc_mask_final(X_pep, X_mhc, MAX_PEP_SEQ_LEN, MHC_SEQ_LEN)
y = data_io_func.pad_seqs(y, T_LEN)
# save Amino Acid seqeunces and MHC receptors:
pep_aa,mhc_molecule = data_io_func.get_pep_aa_mhc(aafile, MAX_PEP_SEQ_LEN)
################################################################################
# PREDICT SINGLE NETWORKS
################################################################################
# variable to store predcitons:
all_pred = np.zeros(( len(ensembles),len(X_pep) ))
# go through each net and predict:
count=0
old_hyper_params=''
for l in ensembles:
paramfile=l[0]
# LOAD PARAMETERS:----------------------------------------------------------
# load parameters of best model:
best_params = np.load(paramfile)['arr_0']
ARCHITECTURE = np.load(paramfile)['arr_1']
hyper_params = np.load(paramfile)['arr_2']
# BUILD NETWORK AND COMPILE TRAINING FUNCTION:------------------------------
if set(hyper_params) != set(old_hyper_params):
if ARCHITECTURE == "lstm":
N_FEATURES=int(hyper_params[0])
N_LSTM=int(hyper_params[1])
ACTIVATION=hyper_params[2]
DROPOUT=float(hyper_params[3])
N_HID=int(hyper_params[4])
W_INIT=hyper_params[5]
network,in_pep_mhc,in_pep_mhc_mask = NN_func.build_lstm(n_features=N_FEATURES,
n_lstm=N_LSTM,
activation=ACTIVATION,
dropout=DROPOUT,
n_hid=N_HID,
w_init=W_INIT)
else:
sys.stderr.write("Unknown architecture specified (option -architecture)!\n")
sys.exit(2)
# COMPILE PREDICTION FUNCTION-----------------------------------------------
prediction = lasagne.layers.get_output(network, deterministic=True)
# compile validation function:
pred_fn = theano.function([in_pep_mhc.input_var, in_pep_mhc_mask.input_var], prediction, on_unused_input='warn')
# SET WEIGHTS---------------------------------------------------------------
# get current parameters:
params = lasagne.layers.get_all_param_values(network)
# check if dimensions match:
assert len(best_params) == len(params)
for j in range(0,len(best_params)):
assert best_params[j].shape == params[j].shape
# set parameters in network:
lasagne.layers.set_all_param_values(network, best_params)
# RUN FORWARD PASS----------------------------------------------------------
# predict validation set:
if ARCHITECTURE == "lstm":
all_pred[count] = pred_fn(X_lstm, X_lstm_mask).flatten()
else:
sys.stderr.write("Unknown data encoding in ensemble list!\n")
sys.exit(2)
old_hyper_params=hyper_params
count +=1
# calculate mean predictions:
pred = np.mean(all_pred, axis=0)
################################################################################
# PRINT RESULTS TABLE
################################################################################
print("# Printing results...")
assert pred.shape[0] == y.shape[0] == len(pep_aa) == len(mhc_molecule)
outfile = open(outfilename, "w")
outfile.write("peptide\tmhc\tprediction\ttarget\n")
y=y.flatten()
for i in range(0,len(pep_aa)):
outfile.write(pep_aa[i] + "\t" + mhc_molecule[i] + "\t" + str(pred[i]) + "\t" + str(y[i]) + "\n")
# calculate PCC:
pcc,pval = pearsonr(pred.flatten(), y.flatten())
# calculate AUC:
y_binary = np.where(y>=0.42562, 1,0)
auc = roc_auc_score(y_binary.flatten(), pred.flatten())
outfile.write("# PCC: " + str(pcc) + " p-value: " + str(pval) + " AUC: " + str(auc) + "\n")
|
vanessajurtz/lasagne4bio
|
peptide_MHCII/scripts/lstm_ensemble.py
|
Python
|
gpl-3.0
| 6,895
|
[
"NetCDF"
] |
3b4e4659cf388b6e1eb6ac111ff81e0f1d678085538867a965935eebb93049a0
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import os
def test_failure():
"""Fail if the MDA_FAILURE_TEST environment variable is set.
"""
# Have a file open to trigger an output from the open_files plugin.
f = open('./failure.txt', 'w')
if u'MDA_FAILURE_TEST' in os.environ:
assert False
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/test_failure.py
|
Python
|
gpl-2.0
| 1,313
|
[
"MDAnalysis"
] |
00aae7dc49a082cfc947dd92a629260387b50718863d1e67d446642a809c572c
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..exceptions import ConvergenceWarning
from ..utils import check_array, as_float_array, check_random_state
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W.
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.linalg.multi_dot([u * (1. / np.sqrt(s)), u.T, W])
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.',
ConvergenceWarning)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, *, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : bool, default=True
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
*A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430*
"""
est = FastICA(n_components=n_components, algorithm=algorithm,
whiten=whiten, fun=fun, fun_args=fun_args,
max_iter=max_iter, tol=tol, w_init=w_init,
random_state=random_state)
sources = est._fit(X, compute_sources=compute_sources)
if whiten:
if return_X_mean:
if return_n_iter:
return (est.whitening_, est._unmixing, sources, est.mean_,
est.n_iter_)
else:
return est.whitening_, est._unmixing, sources, est.mean_
else:
if return_n_iter:
return est.whitening_, est._unmixing, sources, est.n_iter_
else:
return est.whitening_, est._unmixing, sources
else:
if return_X_mean:
if return_n_iter:
return None, est._unmixing, sources, None, est.n_iter_
else:
return None, est._unmixing, sources, None
else:
if return_n_iter:
return None, est._unmixing, sources, est.n_iter_
else:
return None, est._unmixing, sources
class FastICA(TransformerMixin, BaseEstimator):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply parallel or deflational algorithm for FastICA.
whiten : bool, default=True
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations during fit.
tol : float, default=1e-4
Tolerance on update at each iteration.
w_init : ndarray of shape (n_components, n_components), default=None
The mixing matrix to be used to initialize the algorithm.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear operator to apply to the data to get the independent
sources. This is equal to the unmixing matrix when ``whiten`` is
False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
``whiten`` is True.
mixing_ : ndarray of shape (n_features, n_components)
The pseudo-inverse of ``components_``. It is the linear operator
that maps independent sources to the data.
mean_ : ndarray of shape(n_features,)
The mean over features. Only set if `self.whiten` is True.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
whitening_ : ndarray of shape (n_components, n_features)
Only set if whiten is 'True'. This is the pre-whitening matrix
that projects data onto the first `n_components` principal components.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0)
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
Notes
-----
Implementation based on
*A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430*
"""
def __init__(self, n_components=None, *, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super().__init__()
if max_iter < 1:
raise ValueError("max_iter should be greater than 1, got "
"(max_iter={})".format(max_iter))
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
XT = self._validate_data(X, copy=self.whiten, dtype=FLOAT_DTYPES,
ensure_min_samples=2).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if self.fun == 'logcosh':
g = _logcosh
elif self.fun == 'exp':
g = _exp
elif self.fun == 'cube':
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
else:
exc = ValueError if isinstance(self.fun, str) else TypeError
raise exc(
"Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% self.fun
)
n_features, n_samples = XT.shape
n_components = self.n_components
if not self.whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n_samples, n_features)
if (n_components > min(n_samples, n_features)):
n_components = min(n_samples, n_features)
warnings.warn(
'n_components is too large: it will be set to %s'
% n_components
)
if self.whiten:
# Centering the features of X
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(XT, full_matrices=False, check_finite=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, XT)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_samples)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(XT, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(random_state.normal(
size=(n_components, n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
'w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': self.tol,
'g': g,
'fun_args': fun_args,
'max_iter': self.max_iter,
'w_init': w_init}
if self.algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if compute_sources:
if self.whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
self.n_iter_ = n_iter
if self.whiten:
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
check_is_fitted(self)
X = self._validate_data(X, copy=(copy and self.whiten),
dtype=FLOAT_DTYPES, reset=False)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
kevin-intel/scikit-learn
|
sklearn/decomposition/_fastica.py
|
Python
|
bsd-3-clause
| 21,028
|
[
"Gaussian"
] |
e3de0b5f6ce70d8c66bfca2ccfc1e3ff45233d9204eb25fe4c8c0df850eaebdd
|
import json
import random
dialogosf=open("dialogos.txt",'r')
frasesf=open("fraces.txt",'r')
dialogos=json.load(dialogosf)
frases=json.load(frasesf)
def frase(rules):
posibles = dialogos[rules["personaje"]]
validos = []
tamano = 0
for dialogo in posibles:
if(esValida(dialogo,rules) and len(dialogo)>tamano):
validos.append(dialogo)
if len(validos)>0:
tamano = len(validos[0])
better = validos[0]
for dialogo in validos:
if len(dialogo)>tamano:
tamano = len(dialogo)
better = dialogo
if len(dialogo)==tamano:
if(random.randint(0,2) == 1):
better = dialogo
print frases[better["id"]]
def esValida(reglasDialogo,query):
for regla in reglasDialogo:
if((regla!="personaje" and regla!="id") and (regla not in query or reglasDialogo[regla]!=query[regla])):
return False
return True
query = {"personaje":"brian","ubicacion":"pantano","hambre":"50"}
frase(query)
|
cangothic/practica-de-complejidad
|
practica brian y carlos/desserializa.py
|
Python
|
mit
| 1,069
|
[
"Brian"
] |
4317487bf407306c83d9eb9fd6711497367996e7cb9971721764dace1fa4e1ce
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
import argparse
import os
import re
import mx
from datetime import datetime, date, timedelta
def _format_datetime(dt):
def _fmt(num, unit):
return "{:.0f} {}".format(num, unit)
diff = datetime.now() - dt
num = diff.total_seconds()
for unit, top, max_num in [
('seconds', 60, 120),
('minutes', 60, 120),
('hours', 24, 48),
('days', 365, 365),
]:
if num < max_num:
return _fmt(num, unit)
num /= top
unit = "years"
return _fmt(num, unit)
def _format_bytes(num):
def _fmt(num, unit):
return "{:.0f} {}".format(num, unit)
for unit in ['Byte', 'KiB', 'MiB', 'GiB']:
if num < 1024.0:
return _fmt(num, unit)
num /= 1024.0
unit = 'TiB'
return _fmt(num, unit)
_has_scandir = 'scandir' in dir(os)
def _get_size_in_bytes(path, isdir=None):
if isdir is None:
if not os.path.exists(path) or os.path.islink(path):
return 0
if isdir or os.path.isdir(path):
if not _has_scandir:
return sum(_get_size_in_bytes(os.path.join(path, f)) for f in os.listdir(path))
s = 0
with os.scandir(path) as it:
for e in it:
if not e.is_symlink():
if e.is_dir(follow_symlinks=False):
s += _get_size_in_bytes(e.path, isdir=True)
else:
s += e.stat(follow_symlinks=False).st_size
return s
return os.path.getsize(path)
def _listdir(path):
if os.path.isdir(path):
return [p for p in os.listdir(path) if not os.path.islink(p)]
return []
class TimeAction(argparse.Action):
pattern = re.compile(r'^(?:(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d))?T?(?:(?P<hour>\d\d):(?P<minute>\d\d)(?::(?P<second>\d\d))?)?$')
rel_pattern = re.compile(r'^(?P<value>\d+)(?P<unit>min?u?t?e?|da?y?|we?e?k?|mon?t?h?|ye>a>r?)s?$')
fmt = r'%Y-%m-%dT%H:%M:%S or [0-9]+(minutes|days|weeks|months|years)'
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(TimeAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
m = TimeAction.pattern.match(values)
if m:
# default values: today 00:00
today = datetime.combine(date.today(), datetime.min.time())
date_dict = {k: int(v or getattr(today, k)) for k, v in m.groupdict().items()}
td = datetime(**date_dict)
setattr(namespace, self.dest, td)
else:
m = TimeAction.rel_pattern.match(values)
if not m:
raise ValueError('argument {}: value {} does not match format {}'.format(option_string, values, TimeAction.fmt))
minutes_per_day = 24 * 60
unit = m.group('unit')
value = int(m.group('value'))
if unit.startswith('y'):
minutes = value * 365 * minutes_per_day
elif unit.startswith('mo'):
minutes = value * 30 * minutes_per_day
elif unit.startswith('w'):
value += 7 * minutes_per_day
elif unit.startswith('d'):
minutes = value * minutes_per_day
elif unit.startswith('mi'):
minutes = value
else:
raise ValueError('argument {}: Unexpected unit: {}'.format(option_string, unit))
td = datetime.today() - timedelta(minutes=minutes)
setattr(namespace, self.dest, td)
@mx.command('mx', 'gc-dists')
def gc_dists(args):
""" Garbage collect mx distributions."""
parser = argparse.ArgumentParser(prog='mx gc-dists', description='''Garbage collect layout distributions.
By default, it collects all found layout distributions that are *not* part of the current configuration (see `--keep-current`).
This command respects mx level suite filtering (e.g., `mx --suite my-suite gc-dists`).
''', epilog='''If the environment variable `MX_GC_AFTER_BUILD` is set, %(prog)s will be executed after `mx build`
using the content of the environment variable as parameters.''')
# mutually exclusive groups do not support title and description - wrapping in another group as a workaround
action_group_desc = parser.add_argument_group('actions', 'What to do with the result. One of the following arguments is required.')
action_group = action_group_desc.add_mutually_exclusive_group(required=True)
action_group.add_argument('-f', '--force', action='store_true', help='remove layout distributions without further questions')
action_group.add_argument('-n', '--dry-run', action='store_true', help='show what would be removed without actually doing anything')
action_group.add_argument('-i', '--interactive', action='store_true', help='ask for every layout distributions whether it should be removed')
keep_current_group_desc = parser.add_argument_group('current configuration handling', description='How to deal with the current configuration, i.e., what `mx build` would rebuild.')
keep_current_group = keep_current_group_desc.add_mutually_exclusive_group()
keep_current_group.add_argument('--keep-current', action='store_true', default=True, help='keep layout distributions of the current configuration (default)')
keep_current_group.add_argument('--no-keep-current', action='store_false', dest='keep_current', help='remove layout distributions of the current configuration')
filter_group = parser.add_argument_group('result filters', description='Filter can be combined.')
filter_group.add_argument('--reverse', action='store_true', help='reverse the result')
filter_group.add_argument('--older-than', action=TimeAction, help='only show results older than the specified point in time (format: {})'.format(TimeAction.fmt.replace('%', '%%')))
try:
parsed_args = parser.parse_args(args)
except ValueError as ve:
parser.error(str(ve))
suites = mx.suites(opt_limit_to_suite=True, includeBinary=False, include_mx=False)
c = []
for s in suites:
c += _gc_layout_dists(s, parsed_args)
if not c:
mx.log("Nothing to do!")
return
if parsed_args.older_than:
c = [x for x in c if x[1] < parsed_args.older_than]
# sort by mod date
c = sorted(c, key=lambda x: x[1], reverse=parsed_args.reverse)
# calculate max sizes
max_path = 0
max_mod_time = 0
max_size = 0
for path, mod_time, size in c:
max_path = max(len(path), max_path)
max_mod_time = max(len(_format_datetime(mod_time)), max_mod_time)
max_size = max(len(_format_bytes(size)), max_size)
msg_fmt = '{0:<' + str(max_path) + '} modified {1:<' + str(max_mod_time + len(' ago')) +'} {2:<' + str(max_size) + '}'
size_sum = 0
for path, mod_time, size in c:
if parsed_args.dry_run:
mx.log(msg_fmt.format(path, _format_datetime(mod_time) + ' ago', _format_bytes(size)))
size_sum += size
else:
msg = '{0} (modified {1} ago, size {2})'.format(path, _format_datetime(mod_time), _format_bytes(size))
if parsed_args.force or parsed_args.interactive and mx.ask_yes_no('Delete ' + msg):
mx.log('rm ' + path)
mx.rmtree(path)
size_sum += size
if parsed_args.dry_run:
mx.log('Would free ' + _format_bytes(size_sum))
else:
mx.log('Freed ' + _format_bytes(size_sum))
def _gc_layout_dists(suite, parsed_args):
"""Returns a list of collected layout distributions as a tuples of form (path, modification time, size in bytes)."""
mx.logv("GC layout distributions of suite " + suite.name)
known_dists = [d.name for d in suite.dists if d.isLayoutDistribution()] if parsed_args.keep_current else []
def _to_archive_name(d):
return d.lower().replace("_", "-")
# distribution name -> modification date
found_dists = {}
# We use 'savedLayouts' to identify layout distributions. Whenever mx builds a layout distribution, this file is updated.
for dirpath, _, filenames in os.walk(suite.get_output_root(platformDependent=False, jdkDependent=False)):
if os.path.basename(dirpath) == "savedLayouts":
for filename in filenames:
abs_filename = os.path.join(dirpath, filename)
if os.path.isfile(abs_filename) and not os.path.islink(abs_filename):
# we use modification time of the saved layouts file since that is the canonical modified time
found_dists[filename] = datetime.fromtimestamp(os.path.getmtime(abs_filename))
# distribution name -> modification date
unknown_dists = {distname: moddate for distname, moddate in found_dists.items() if distname not in known_dists}
# full artifact path -> dist
candidates = {}
# search for the layout distribution folder as well as for the archive, platform/jdk dependent and independent
for jdkDependent in [True, False]:
for platformDependent in [True, False]:
dist_dir = suite.get_output_root(platformDependent=platformDependent, jdkDependent=jdkDependent)
candidates.update({os.path.join(dist_dir, x): x for x in _listdir(dist_dir) if x in unknown_dists.keys()})
for ext in [".tar", ".zip"]:
unknown_archives = {_to_archive_name(d) + ext: d for d in unknown_dists.keys()}
archive_dir = os.path.join(dist_dir, "dists")
candidates.update({os.path.join(archive_dir, x): unknown_archives.get(x) for x in _listdir(archive_dir) if x in unknown_archives.keys()})
return [(full_path, unknown_dists.get(dist), _get_size_in_bytes(full_path)) for full_path, dist in candidates.items()]
|
graalvm/mx
|
mx_gc.py
|
Python
|
gpl-2.0
| 11,168
|
[
"VisIt"
] |
493dbc1e882457afa5b226fbb97263b8db067db46a0ac033fdfc0f4347143c45
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
Test loss: 0.08240645187353703
Test accuracy: 0.983
"""
# Based on provided Keras example
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
batch_size = 128
num_classes = 10
epochs = 20
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print("Training samples: {}".format(x_train.shape[0]))
print("Test samples: {}".format(x_test.shape[0]))
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='sigmoid', input_shape=(784,)))
model.add(Dense(512, activation='sigmoid'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss: {}'.format(score[0]))
print('Test accuracy: {}'.format(score[1]))
|
jeffheaton/aifh
|
vol3/vol3-python-examples/examples/example_mnist_sigmoid.py
|
Python
|
apache-2.0
| 2,485
|
[
"VisIt"
] |
99e3b2d3f9e6bacdcd67267ccc6d6260d03025796e002a7b6205d920ed381311
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.